Fix vCD attach failure
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import itertools
37 import requests
38 import ssl
39 import atexit
40
41 from pyVmomi import vim, vmodl
42 from pyVim.connect import SmartConnect, Disconnect
43
44 from xml.etree import ElementTree as XmlElementTree
45 from lxml import etree as lxmlElementTree
46
47 import yaml
48 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
49 from pyvcloud.vcd.vdc import VDC
50 from pyvcloud.vcd.org import Org
51 import re
52 from pyvcloud.vcd.vapp import VApp
53 from xml.sax.saxutils import escape
54 import logging
55 import json
56 import time
57 import uuid
58 import httplib
59 #For python3
60 #import http.client
61 import hashlib
62 import socket
63 import struct
64 import netaddr
65 import random
66
67 # global variable for vcd connector type
68 STANDALONE = 'standalone'
69
70 # key for flavor dicts
71 FLAVOR_RAM_KEY = 'ram'
72 FLAVOR_VCPUS_KEY = 'vcpus'
73 FLAVOR_DISK_KEY = 'disk'
74 DEFAULT_IP_PROFILE = {'dhcp_count':50,
75 'dhcp_enabled':True,
76 'ip_version':"IPv4"
77 }
78 # global variable for wait time
79 INTERVAL_TIME = 5
80 MAX_WAIT_TIME = 1800
81
82 API_VERSION = '27.0'
83
84 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
85 __date__ = "$09-Mar-2018 11:09:29$"
86 __version__ = '0.2'
87
88 # -1: "Could not be created",
89 # 0: "Unresolved",
90 # 1: "Resolved",
91 # 2: "Deployed",
92 # 3: "Suspended",
93 # 4: "Powered on",
94 # 5: "Waiting for user input",
95 # 6: "Unknown state",
96 # 7: "Unrecognized state",
97 # 8: "Powered off",
98 # 9: "Inconsistent state",
99 # 10: "Children do not all have the same status",
100 # 11: "Upload initiated, OVF descriptor pending",
101 # 12: "Upload initiated, copying contents",
102 # 13: "Upload initiated , disk contents pending",
103 # 14: "Upload has been quarantined",
104 # 15: "Upload quarantine period has expired"
105
106 # mapping vCD status to MANO
107 vcdStatusCode2manoFormat = {4: 'ACTIVE',
108 7: 'PAUSED',
109 3: 'SUSPENDED',
110 8: 'INACTIVE',
111 12: 'BUILD',
112 -1: 'ERROR',
113 14: 'DELETED'}
114
115 #
116 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
117 'ERROR': 'ERROR', 'DELETED': 'DELETED'
118 }
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '%s'" % str(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '%s'" % str(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False)
309 client_as_admin.set_highest_supported_version()
310 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
311 except Exception as e:
312 raise vimconn.vimconnException(
313 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
314
315 return client_as_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return client object that latter can be used to connect to vCloud director as admin for VDC
322 """
323 try:
324 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
325 self.user,
326 self.org_name))
327 host = self.url
328 client = Client(host, verify_ssl_certs=False)
329 client.set_highest_supported_version()
330 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
331 except:
332 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
333 "{} as user: {}".format(self.org_name, self.user))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except:
393 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
449 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
450 #For python3
451 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
452 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
453 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
454 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
455
456 response = self.perform_request(req_type='DELETE',
457 url=vdc_remove_href,
458 headers=headers)
459
460 if response.status_code == 202:
461 time.sleep(5)
462 return tenant_id
463 else:
464 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
465 "Return status code {}".format(vdc_remove_href,
466 response.status_code))
467 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
468 else:
469 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
470 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
471
472
473 def get_tenant_list(self, filter_dict={}):
474 """Obtain tenants of VIM
475 filter_dict can contain the following keys:
476 name: filter by tenant name
477 id: filter by tenant uuid/id
478 <other VIM specific>
479 Returns the tenant list of dictionaries:
480 [{'name':'<name>, 'id':'<id>, ...}, ...]
481
482 """
483 org_dict = self.get_org(self.org_uuid)
484 vdcs_dict = org_dict['vdcs']
485
486 vdclist = []
487 try:
488 for k in vdcs_dict:
489 entry = {'name': vdcs_dict[k], 'id': k}
490 # if caller didn't specify dictionary we return all tenants.
491 if filter_dict is not None and filter_dict:
492 filtered_entry = entry.copy()
493 filtered_dict = set(entry.keys()) - set(filter_dict)
494 for unwanted_key in filtered_dict: del entry[unwanted_key]
495 if filter_dict == entry:
496 vdclist.append(filtered_entry)
497 else:
498 vdclist.append(entry)
499 except:
500 self.logger.debug("Error in get_tenant_list()")
501 self.logger.debug(traceback.format_exc())
502 raise vimconn.vimconnException("Incorrect state. {}")
503
504 return vdclist
505
506 def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
507 """Adds a tenant network to VIM
508 Params:
509 'net_name': name of the network
510 'net_type': one of:
511 'bridge': overlay isolated network
512 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
513 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
514 'ip_profile': is a dict containing the IP parameters of the network
515 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
516 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
517 'gateway_address': (Optional) ip_schema, that is X.X.X.X
518 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
519 'dhcp_enabled': True or False
520 'dhcp_start_address': ip_schema, first IP to grant
521 'dhcp_count': number of IPs to grant.
522 'shared': if this network can be seen/use by other tenants/organization
523 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
524 Returns a tuple with the network identifier and created_items, or raises an exception on error
525 created_items can be None or a dictionary where this method can include key-values that will be passed to
526 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
527 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
528 as not present.
529 """
530
531 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
532 .format(net_name, net_type, ip_profile, shared))
533
534 created_items = {}
535 isshared = 'false'
536 if shared:
537 isshared = 'true'
538
539 # ############# Stub code for SRIOV #################
540 # if net_type == "data" or net_type == "ptp":
541 # if self.config.get('dv_switch_name') == None:
542 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
543 # network_uuid = self.create_dvPort_group(net_name)
544
545 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
546 ip_profile=ip_profile, isshared=isshared)
547 if network_uuid is not None:
548 return network_uuid, created_items
549 else:
550 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
551
552 def get_vcd_network_list(self):
553 """ Method available organization for a logged in tenant
554
555 Returns:
556 The return vca object that letter can be used to connect to vcloud direct as admin
557 """
558
559 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
560
561 if not self.tenant_name:
562 raise vimconn.vimconnConnectionException("Tenant name is empty.")
563
564 org, vdc = self.get_vdc_details()
565 if vdc is None:
566 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
567
568 vdc_uuid = vdc.get('id').split(":")[3]
569 if self.client._session:
570 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
571 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
572 response = self.perform_request(req_type='GET',
573 url=vdc.get('href'),
574 headers=headers)
575 if response.status_code != 200:
576 self.logger.error("Failed to get vdc content")
577 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
578 else:
579 content = XmlElementTree.fromstring(response.content)
580
581 network_list = []
582 try:
583 for item in content:
584 if item.tag.split('}')[-1] == 'AvailableNetworks':
585 for net in item:
586 response = self.perform_request(req_type='GET',
587 url=net.get('href'),
588 headers=headers)
589
590 if response.status_code != 200:
591 self.logger.error("Failed to get network content")
592 raise vimconn.vimconnNotFoundException("Failed to get network content")
593 else:
594 net_details = XmlElementTree.fromstring(response.content)
595
596 filter_dict = {}
597 net_uuid = net_details.get('id').split(":")
598 if len(net_uuid) != 4:
599 continue
600 else:
601 net_uuid = net_uuid[3]
602 # create dict entry
603 self.logger.debug("get_vcd_network_list(): Adding network {} "
604 "to a list vcd id {} network {}".format(net_uuid,
605 vdc_uuid,
606 net_details.get('name')))
607 filter_dict["name"] = net_details.get('name')
608 filter_dict["id"] = net_uuid
609 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
610 shared = True
611 else:
612 shared = False
613 filter_dict["shared"] = shared
614 filter_dict["tenant_id"] = vdc_uuid
615 if int(net_details.get('status')) == 1:
616 filter_dict["admin_state_up"] = True
617 else:
618 filter_dict["admin_state_up"] = False
619 filter_dict["status"] = "ACTIVE"
620 filter_dict["type"] = "bridge"
621 network_list.append(filter_dict)
622 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
623 except:
624 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
625 pass
626
627 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
628 return network_list
629
630 def get_network_list(self, filter_dict={}):
631 """Obtain tenant networks of VIM
632 Filter_dict can be:
633 name: network name OR/AND
634 id: network uuid OR/AND
635 shared: boolean OR/AND
636 tenant_id: tenant OR/AND
637 admin_state_up: boolean
638 status: 'ACTIVE'
639
640 [{key : value , key : value}]
641
642 Returns the network list of dictionaries:
643 [{<the fields at Filter_dict plus some VIM specific>}, ...]
644 List can be empty
645 """
646
647 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
648
649 if not self.tenant_name:
650 raise vimconn.vimconnConnectionException("Tenant name is empty.")
651
652 org, vdc = self.get_vdc_details()
653 if vdc is None:
654 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
655
656 try:
657 vdcid = vdc.get('id').split(":")[3]
658
659 if self.client._session:
660 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
661 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
662 response = self.perform_request(req_type='GET',
663 url=vdc.get('href'),
664 headers=headers)
665 if response.status_code != 200:
666 self.logger.error("Failed to get vdc content")
667 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
668 else:
669 content = XmlElementTree.fromstring(response.content)
670
671 network_list = []
672 for item in content:
673 if item.tag.split('}')[-1] == 'AvailableNetworks':
674 for net in item:
675 response = self.perform_request(req_type='GET',
676 url=net.get('href'),
677 headers=headers)
678
679 if response.status_code != 200:
680 self.logger.error("Failed to get network content")
681 raise vimconn.vimconnNotFoundException("Failed to get network content")
682 else:
683 net_details = XmlElementTree.fromstring(response.content)
684
685 filter_entry = {}
686 net_uuid = net_details.get('id').split(":")
687 if len(net_uuid) != 4:
688 continue
689 else:
690 net_uuid = net_uuid[3]
691 # create dict entry
692 self.logger.debug("get_network_list(): Adding net {}"
693 " to a list vcd id {} network {}".format(net_uuid,
694 vdcid,
695 net_details.get('name')))
696 filter_entry["name"] = net_details.get('name')
697 filter_entry["id"] = net_uuid
698 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
699 shared = True
700 else:
701 shared = False
702 filter_entry["shared"] = shared
703 filter_entry["tenant_id"] = vdcid
704 if int(net_details.get('status')) == 1:
705 filter_entry["admin_state_up"] = True
706 else:
707 filter_entry["admin_state_up"] = False
708 filter_entry["status"] = "ACTIVE"
709 filter_entry["type"] = "bridge"
710 filtered_entry = filter_entry.copy()
711
712 if filter_dict is not None and filter_dict:
713 # we remove all the key : value we don't care and match only
714 # respected field
715 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
716 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
717 if filter_dict == filter_entry:
718 network_list.append(filtered_entry)
719 else:
720 network_list.append(filtered_entry)
721 except Exception as e:
722 self.logger.debug("Error in get_network_list",exc_info=True)
723 if isinstance(e, vimconn.vimconnException):
724 raise
725 else:
726 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
727
728 self.logger.debug("Returning {}".format(network_list))
729 return network_list
730
731 def get_network(self, net_id):
732 """Method obtains network details of net_id VIM network
733 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
734
735 try:
736 org, vdc = self.get_vdc_details()
737 vdc_id = vdc.get('id').split(":")[3]
738 if self.client._session:
739 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
740 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
741 response = self.perform_request(req_type='GET',
742 url=vdc.get('href'),
743 headers=headers)
744 if response.status_code != 200:
745 self.logger.error("Failed to get vdc content")
746 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
747 else:
748 content = XmlElementTree.fromstring(response.content)
749
750 filter_dict = {}
751
752 for item in content:
753 if item.tag.split('}')[-1] == 'AvailableNetworks':
754 for net in item:
755 response = self.perform_request(req_type='GET',
756 url=net.get('href'),
757 headers=headers)
758
759 if response.status_code != 200:
760 self.logger.error("Failed to get network content")
761 raise vimconn.vimconnNotFoundException("Failed to get network content")
762 else:
763 net_details = XmlElementTree.fromstring(response.content)
764
765 vdc_network_id = net_details.get('id').split(":")
766 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
767 filter_dict["name"] = net_details.get('name')
768 filter_dict["id"] = vdc_network_id[3]
769 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
770 shared = True
771 else:
772 shared = False
773 filter_dict["shared"] = shared
774 filter_dict["tenant_id"] = vdc_id
775 if int(net_details.get('status')) == 1:
776 filter_dict["admin_state_up"] = True
777 else:
778 filter_dict["admin_state_up"] = False
779 filter_dict["status"] = "ACTIVE"
780 filter_dict["type"] = "bridge"
781 self.logger.debug("Returning {}".format(filter_dict))
782 return filter_dict
783 else:
784 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
785 except Exception as e:
786 self.logger.debug("Error in get_network")
787 self.logger.debug(traceback.format_exc())
788 if isinstance(e, vimconn.vimconnException):
789 raise
790 else:
791 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
792
793 return filter_dict
794
795 def delete_network(self, net_id, created_items=None):
796 """
797 Removes a tenant network from VIM and its associated elements
798 :param net_id: VIM identifier of the network, provided by method new_network
799 :param created_items: dictionary with extra items to be deleted. provided by method new_network
800 Returns the network identifier or raises an exception upon error or when network is not found
801 """
802
803 # ############# Stub code for SRIOV #################
804 # dvport_group = self.get_dvport_group(net_id)
805 # if dvport_group:
806 # #delete portgroup
807 # status = self.destroy_dvport_group(net_id)
808 # if status:
809 # # Remove vlanID from persistent info
810 # if net_id in self.persistent_info["used_vlanIDs"]:
811 # del self.persistent_info["used_vlanIDs"][net_id]
812 #
813 # return net_id
814
815 vcd_network = self.get_vcd_network(network_uuid=net_id)
816 if vcd_network is not None and vcd_network:
817 if self.delete_network_action(network_uuid=net_id):
818 return net_id
819 else:
820 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
821
822 def refresh_nets_status(self, net_list):
823 """Get the status of the networks
824 Params: the list of network identifiers
825 Returns a dictionary with:
826 net_id: #VIM id of this network
827 status: #Mandatory. Text with one of:
828 # DELETED (not found at vim)
829 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
830 # OTHER (Vim reported other status not understood)
831 # ERROR (VIM indicates an ERROR status)
832 # ACTIVE, INACTIVE, DOWN (admin down),
833 # BUILD (on building process)
834 #
835 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
836 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
837
838 """
839
840 dict_entry = {}
841 try:
842 for net in net_list:
843 errormsg = ''
844 vcd_network = self.get_vcd_network(network_uuid=net)
845 if vcd_network is not None and vcd_network:
846 if vcd_network['status'] == '1':
847 status = 'ACTIVE'
848 else:
849 status = 'DOWN'
850 else:
851 status = 'DELETED'
852 errormsg = 'Network not found.'
853
854 dict_entry[net] = {'status': status, 'error_msg': errormsg,
855 'vim_info': yaml.safe_dump(vcd_network)}
856 except:
857 self.logger.debug("Error in refresh_nets_status")
858 self.logger.debug(traceback.format_exc())
859
860 return dict_entry
861
862 def get_flavor(self, flavor_id):
863 """Obtain flavor details from the VIM
864 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
865 """
866 if flavor_id not in vimconnector.flavorlist:
867 raise vimconn.vimconnNotFoundException("Flavor not found.")
868 return vimconnector.flavorlist[flavor_id]
869
870 def new_flavor(self, flavor_data):
871 """Adds a tenant flavor to VIM
872 flavor_data contains a dictionary with information, keys:
873 name: flavor name
874 ram: memory (cloud type) in MBytes
875 vpcus: cpus (cloud type)
876 extended: EPA parameters
877 - numas: #items requested in same NUMA
878 memory: number of 1G huge pages memory
879 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
880 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
881 - name: interface name
882 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
883 bandwidth: X Gbps; requested guarantee bandwidth
884 vpci: requested virtual PCI address
885 disk: disk size
886 is_public:
887 #TODO to concrete
888 Returns the flavor identifier"""
889
890 # generate a new uuid put to internal dict and return it.
891 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
892 new_flavor=flavor_data
893 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
894 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
895 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
896
897 if not isinstance(ram, int):
898 raise vimconn.vimconnException("Non-integer value for ram")
899 elif not isinstance(cpu, int):
900 raise vimconn.vimconnException("Non-integer value for cpu")
901 elif not isinstance(disk, int):
902 raise vimconn.vimconnException("Non-integer value for disk")
903
904 extended_flv = flavor_data.get("extended")
905 if extended_flv:
906 numas=extended_flv.get("numas")
907 if numas:
908 for numa in numas:
909 #overwrite ram and vcpus
910 if 'memory' in numa:
911 ram = numa['memory']*1024
912 if 'paired-threads' in numa:
913 cpu = numa['paired-threads']*2
914 elif 'cores' in numa:
915 cpu = numa['cores']
916 elif 'threads' in numa:
917 cpu = numa['threads']
918
919 new_flavor[FLAVOR_RAM_KEY] = ram
920 new_flavor[FLAVOR_VCPUS_KEY] = cpu
921 new_flavor[FLAVOR_DISK_KEY] = disk
922 # generate a new uuid put to internal dict and return it.
923 flavor_id = uuid.uuid4()
924 vimconnector.flavorlist[str(flavor_id)] = new_flavor
925 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
926
927 return str(flavor_id)
928
929 def delete_flavor(self, flavor_id):
930 """Deletes a tenant flavor from VIM identify by its id
931
932 Returns the used id or raise an exception
933 """
934 if flavor_id not in vimconnector.flavorlist:
935 raise vimconn.vimconnNotFoundException("Flavor not found.")
936
937 vimconnector.flavorlist.pop(flavor_id, None)
938 return flavor_id
939
940 def new_image(self, image_dict):
941 """
942 Adds a tenant image to VIM
943 Returns:
944 200, image-id if the image is created
945 <0, message if there is an error
946 """
947
948 return self.get_image_id_from_path(image_dict['location'])
949
950 def delete_image(self, image_id):
951 """
952 Deletes a tenant image from VIM
953 Args:
954 image_id is ID of Image to be deleted
955 Return:
956 returns the image identifier in UUID format or raises an exception on error
957 """
958 conn = self.connect_as_admin()
959 if not conn:
960 raise vimconn.vimconnConnectionException("Failed to connect vCD")
961 # Get Catalog details
962 url_list = [self.url, '/api/catalog/', image_id]
963 catalog_herf = ''.join(url_list)
964
965 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
966 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
967
968 response = self.perform_request(req_type='GET',
969 url=catalog_herf,
970 headers=headers)
971
972 if response.status_code != requests.codes.ok:
973 self.logger.debug("delete_image():GET REST API call {} failed. "\
974 "Return status code {}".format(catalog_herf,
975 response.status_code))
976 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
977
978 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
979 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
980 #For python3
981 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
982 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
983
984 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
985 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
986 for catalogItem in catalogItems:
987 catalogItem_href = catalogItem.attrib['href']
988
989 response = self.perform_request(req_type='GET',
990 url=catalogItem_href,
991 headers=headers)
992
993 if response.status_code != requests.codes.ok:
994 self.logger.debug("delete_image():GET REST API call {} failed. "\
995 "Return status code {}".format(catalog_herf,
996 response.status_code))
997 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
998 catalogItem,
999 image_id))
1000
1001 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1002 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
1003 #For python3
1004 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
1005 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
1006 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
1007
1008 #Remove catalogItem
1009 response = self.perform_request(req_type='DELETE',
1010 url=catalogitem_remove_href,
1011 headers=headers)
1012 if response.status_code == requests.codes.no_content:
1013 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1014 else:
1015 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1016
1017 #Remove catalog
1018 url_list = [self.url, '/api/admin/catalog/', image_id]
1019 catalog_remove_herf = ''.join(url_list)
1020 response = self.perform_request(req_type='DELETE',
1021 url=catalog_remove_herf,
1022 headers=headers)
1023
1024 if response.status_code == requests.codes.no_content:
1025 self.logger.debug("Deleted Catalog {}".format(image_id))
1026 return image_id
1027 else:
1028 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1029
1030
1031 def catalog_exists(self, catalog_name, catalogs):
1032 """
1033
1034 :param catalog_name:
1035 :param catalogs:
1036 :return:
1037 """
1038 for catalog in catalogs:
1039 if catalog['name'] == catalog_name:
1040 return catalog['id']
1041
1042 def create_vimcatalog(self, vca=None, catalog_name=None):
1043 """ Create new catalog entry in vCloud director.
1044
1045 Args
1046 vca: vCloud director.
1047 catalog_name catalog that client wish to create. Note no validation done for a name.
1048 Client must make sure that provide valid string representation.
1049
1050 Returns catalog id if catalog created else None.
1051
1052 """
1053 try:
1054 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1055 if lxml_catalog_element:
1056 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1057 return id_attr_value.split(':')[-1]
1058 catalogs = vca.list_catalogs()
1059 except Exception as ex:
1060 self.logger.error(
1061 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1062 raise
1063 return self.catalog_exists(catalog_name, catalogs)
1064
1065 # noinspection PyIncorrectDocstring
1066 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1067 description='', progress=False, chunk_bytes=128 * 1024):
1068 """
1069 Uploads a OVF file to a vCloud catalog
1070
1071 :param chunk_bytes:
1072 :param progress:
1073 :param description:
1074 :param image_name:
1075 :param vca:
1076 :param catalog_name: (str): The name of the catalog to upload the media.
1077 :param media_file_name: (str): The name of the local media file to upload.
1078 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1079 """
1080 os.path.isfile(media_file_name)
1081 statinfo = os.stat(media_file_name)
1082
1083 # find a catalog entry where we upload OVF.
1084 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1085 # status change.
1086 # if VCD can parse OVF we upload VMDK file
1087 try:
1088 for catalog in vca.list_catalogs():
1089 if catalog_name != catalog['name']:
1090 continue
1091 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1092 data = """
1093 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1094 """.format(catalog_name, description)
1095
1096 if self.client:
1097 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1098 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1099 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1100
1101 response = self.perform_request(req_type='POST',
1102 url=catalog_href,
1103 headers=headers,
1104 data=data)
1105
1106 if response.status_code == requests.codes.created:
1107 catalogItem = XmlElementTree.fromstring(response.content)
1108 entity = [child for child in catalogItem if
1109 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1110 href = entity.get('href')
1111 template = href
1112
1113 response = self.perform_request(req_type='GET',
1114 url=href,
1115 headers=headers)
1116
1117 if response.status_code == requests.codes.ok:
1118 headers['Content-Type'] = 'Content-Type text/xml'
1119 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1120 if result:
1121 transfer_href = result.group(1)
1122
1123 response = self.perform_request(req_type='PUT',
1124 url=transfer_href,
1125 headers=headers,
1126 data=open(media_file_name, 'rb'))
1127 if response.status_code != requests.codes.ok:
1128 self.logger.debug(
1129 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1130 media_file_name))
1131 return False
1132
1133 # TODO fix this with aync block
1134 time.sleep(5)
1135
1136 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1137
1138 # uploading VMDK file
1139 # check status of OVF upload and upload remaining files.
1140 response = self.perform_request(req_type='GET',
1141 url=template,
1142 headers=headers)
1143
1144 if response.status_code == requests.codes.ok:
1145 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1146 if result:
1147 link_href = result.group(1)
1148 # we skip ovf since it already uploaded.
1149 if 'ovf' in link_href:
1150 continue
1151 # The OVF file and VMDK must be in a same directory
1152 head, tail = os.path.split(media_file_name)
1153 file_vmdk = head + '/' + link_href.split("/")[-1]
1154 if not os.path.isfile(file_vmdk):
1155 return False
1156 statinfo = os.stat(file_vmdk)
1157 if statinfo.st_size == 0:
1158 return False
1159 hrefvmdk = link_href
1160
1161 if progress:
1162 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1163 FileTransferSpeed()]
1164 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1165
1166 bytes_transferred = 0
1167 f = open(file_vmdk, 'rb')
1168 while bytes_transferred < statinfo.st_size:
1169 my_bytes = f.read(chunk_bytes)
1170 if len(my_bytes) <= chunk_bytes:
1171 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1172 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1173 headers['Content-Length'] = str(len(my_bytes))
1174 response = requests.put(url=hrefvmdk,
1175 headers=headers,
1176 data=my_bytes,
1177 verify=False)
1178 if response.status_code == requests.codes.ok:
1179 bytes_transferred += len(my_bytes)
1180 if progress:
1181 progress_bar.update(bytes_transferred)
1182 else:
1183 self.logger.debug(
1184 'file upload failed with error: [%s] %s' % (response.status_code,
1185 response.content))
1186
1187 f.close()
1188 return False
1189 f.close()
1190 if progress:
1191 progress_bar.finish()
1192 time.sleep(10)
1193 return True
1194 else:
1195 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1196 format(catalog_name, media_file_name))
1197 return False
1198 except Exception as exp:
1199 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1200 .format(catalog_name,media_file_name, exp))
1201 raise vimconn.vimconnException(
1202 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1203 .format(catalog_name,media_file_name, exp))
1204
1205 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1206 return False
1207
1208 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1209 """Upload media file"""
1210 # TODO add named parameters for readability
1211
1212 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1213 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1214
1215 def validate_uuid4(self, uuid_string=None):
1216 """ Method validate correct format of UUID.
1217
1218 Return: true if string represent valid uuid
1219 """
1220 try:
1221 val = uuid.UUID(uuid_string, version=4)
1222 except ValueError:
1223 return False
1224 return True
1225
1226 def get_catalogid(self, catalog_name=None, catalogs=None):
1227 """ Method check catalog and return catalog ID in UUID format.
1228
1229 Args
1230 catalog_name: catalog name as string
1231 catalogs: list of catalogs.
1232
1233 Return: catalogs uuid
1234 """
1235
1236 for catalog in catalogs:
1237 if catalog['name'] == catalog_name:
1238 catalog_id = catalog['id']
1239 return catalog_id
1240 return None
1241
1242 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1243 """ Method check catalog and return catalog name lookup done by catalog UUID.
1244
1245 Args
1246 catalog_name: catalog name as string
1247 catalogs: list of catalogs.
1248
1249 Return: catalogs name or None
1250 """
1251
1252 if not self.validate_uuid4(uuid_string=catalog_uuid):
1253 return None
1254
1255 for catalog in catalogs:
1256 catalog_id = catalog.get('id')
1257 if catalog_id == catalog_uuid:
1258 return catalog.get('name')
1259 return None
1260
1261 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1262 """ Method check catalog and return catalog name lookup done by catalog UUID.
1263
1264 Args
1265 catalog_name: catalog name as string
1266 catalogs: list of catalogs.
1267
1268 Return: catalogs name or None
1269 """
1270
1271 if not self.validate_uuid4(uuid_string=catalog_uuid):
1272 return None
1273
1274 for catalog in catalogs:
1275 catalog_id = catalog.get('id')
1276 if catalog_id == catalog_uuid:
1277 return catalog
1278 return None
1279
1280 def get_image_id_from_path(self, path=None, progress=False):
1281 """ Method upload OVF image to vCloud director.
1282
1283 Each OVF image represented as single catalog entry in vcloud director.
1284 The method check for existing catalog entry. The check done by file name without file extension.
1285
1286 if given catalog name already present method will respond with existing catalog uuid otherwise
1287 it will create new catalog entry and upload OVF file to newly created catalog.
1288
1289 If method can't create catalog entry or upload a file it will throw exception.
1290
1291 Method accept boolean flag progress that will output progress bar. It useful method
1292 for standalone upload use case. In case to test large file upload.
1293
1294 Args
1295 path: - valid path to OVF file.
1296 progress - boolean progress bar show progress bar.
1297
1298 Return: if image uploaded correct method will provide image catalog UUID.
1299 """
1300
1301 if not path:
1302 raise vimconn.vimconnException("Image path can't be None.")
1303
1304 if not os.path.isfile(path):
1305 raise vimconn.vimconnException("Can't read file. File not found.")
1306
1307 if not os.access(path, os.R_OK):
1308 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1309
1310 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1311
1312 dirpath, filename = os.path.split(path)
1313 flname, file_extension = os.path.splitext(path)
1314 if file_extension != '.ovf':
1315 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1316 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1317
1318 catalog_name = os.path.splitext(filename)[0]
1319 catalog_md5_name = hashlib.md5(path).hexdigest()
1320 self.logger.debug("File name {} Catalog Name {} file path {} "
1321 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1322
1323 try:
1324 org,vdc = self.get_vdc_details()
1325 catalogs = org.list_catalogs()
1326 except Exception as exp:
1327 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1328 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1329
1330 if len(catalogs) == 0:
1331 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1332 if self.create_vimcatalog(org, catalog_md5_name) is None:
1333 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1334
1335 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1336 media_name=filename, medial_file_name=path, progress=progress)
1337 if not result:
1338 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1339 return self.get_catalogid(catalog_name, catalogs)
1340 else:
1341 for catalog in catalogs:
1342 # search for existing catalog if we find same name we return ID
1343 # TODO optimize this
1344 if catalog['name'] == catalog_md5_name:
1345 self.logger.debug("Found existing catalog entry for {} "
1346 "catalog id {}".format(catalog_name,
1347 self.get_catalogid(catalog_md5_name, catalogs)))
1348 return self.get_catalogid(catalog_md5_name, catalogs)
1349
1350 # if we didn't find existing catalog we create a new one and upload image.
1351 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1352 if self.create_vimcatalog(org, catalog_md5_name) is None:
1353 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1354
1355 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1356 media_name=filename, medial_file_name=path, progress=progress)
1357 if not result:
1358 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1359
1360 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1361
1362 def get_image_list(self, filter_dict={}):
1363 '''Obtain tenant images from VIM
1364 Filter_dict can be:
1365 name: image name
1366 id: image uuid
1367 checksum: image checksum
1368 location: image path
1369 Returns the image list of dictionaries:
1370 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1371 List can be empty
1372 '''
1373
1374 try:
1375 org, vdc = self.get_vdc_details()
1376 image_list = []
1377 catalogs = org.list_catalogs()
1378 if len(catalogs) == 0:
1379 return image_list
1380 else:
1381 for catalog in catalogs:
1382 catalog_uuid = catalog.get('id')
1383 name = catalog.get('name')
1384 filtered_dict = {}
1385 if filter_dict.get("name") and filter_dict["name"] != name:
1386 continue
1387 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1388 continue
1389 filtered_dict ["name"] = name
1390 filtered_dict ["id"] = catalog_uuid
1391 image_list.append(filtered_dict)
1392
1393 self.logger.debug("List of already created catalog items: {}".format(image_list))
1394 return image_list
1395 except Exception as exp:
1396 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1397
1398 def get_vappid(self, vdc=None, vapp_name=None):
1399 """ Method takes vdc object and vApp name and returns vapp uuid or None
1400
1401 Args:
1402 vdc: The VDC object.
1403 vapp_name: is application vappp name identifier
1404
1405 Returns:
1406 The return vApp name otherwise None
1407 """
1408 if vdc is None or vapp_name is None:
1409 return None
1410 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1411 try:
1412 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1413 vdc.ResourceEntities.ResourceEntity)
1414 #For python3
1415 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1416 # if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1417 if len(refs) == 1:
1418 return refs[0].href.split("vapp")[1][1:]
1419 except Exception as e:
1420 self.logger.exception(e)
1421 return False
1422 return None
1423
1424 def check_vapp(self, vdc=None, vapp_uuid=None):
1425 """ Method Method returns True or False if vapp deployed in vCloud director
1426
1427 Args:
1428 vca: Connector to VCA
1429 vdc: The VDC object.
1430 vappid: vappid is application identifier
1431
1432 Returns:
1433 The return True if vApp deployed
1434 :param vdc:
1435 :param vapp_uuid:
1436 """
1437 try:
1438 refs = filter(lambda ref:
1439 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1440 vdc.ResourceEntities.ResourceEntity)
1441 #For python3
1442 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1443 # if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1444 for ref in refs:
1445 vappid = ref.href.split("vapp")[1][1:]
1446 # find vapp with respected vapp uuid
1447 if vappid == vapp_uuid:
1448 return True
1449 except Exception as e:
1450 self.logger.exception(e)
1451 return False
1452 return False
1453
1454 def get_namebyvappid(self, vapp_uuid=None):
1455 """Method returns vApp name from vCD and lookup done by vapp_id.
1456
1457 Args:
1458 vapp_uuid: vappid is application identifier
1459
1460 Returns:
1461 The return vApp name otherwise None
1462 """
1463 try:
1464 if self.client and vapp_uuid:
1465 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1466 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1467 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1468
1469 response = self.perform_request(req_type='GET',
1470 url=vapp_call,
1471 headers=headers)
1472 #Retry login if session expired & retry sending request
1473 if response.status_code == 403:
1474 response = self.retry_rest('GET', vapp_call)
1475
1476 tree = XmlElementTree.fromstring(response.content)
1477 return tree.attrib['name']
1478 except Exception as e:
1479 self.logger.exception(e)
1480 return None
1481 return None
1482
1483 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1484 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1485 """Adds a VM instance to VIM
1486 Params:
1487 'start': (boolean) indicates if VM must start or created in pause mode.
1488 'image_id','flavor_id': image and flavor VIM id to use for the VM
1489 'net_list': list of interfaces, each one is a dictionary with:
1490 'name': (optional) name for the interface.
1491 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1492 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1493 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1494 'mac_address': (optional) mac address to assign to this interface
1495 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1496 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1497 'type': (mandatory) can be one of:
1498 'virtual', in this case always connected to a network of type 'net_type=bridge'
1499 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1500 can created unconnected
1501 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1502 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1503 are allocated on the same physical NIC
1504 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1505 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1506 or True, it must apply the default VIM behaviour
1507 After execution the method will add the key:
1508 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1509 interface. 'net_list' is modified
1510 'cloud_config': (optional) dictionary with:
1511 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1512 'users': (optional) list of users to be inserted, each item is a dict with:
1513 'name': (mandatory) user name,
1514 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1515 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1516 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1517 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1518 'dest': (mandatory) string with the destination absolute path
1519 'encoding': (optional, by default text). Can be one of:
1520 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1521 'content' (mandatory): string with the content of the file
1522 'permissions': (optional) string with file permissions, typically octal notation '0644'
1523 'owner': (optional) file owner, string with the format 'owner:group'
1524 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1525 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1526 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1527 'size': (mandatory) string with the size of the disk in GB
1528 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1529 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1530 availability_zone_index is None
1531 Returns a tuple with the instance identifier and created_items or raises an exception on error
1532 created_items can be None or a dictionary where this method can include key-values that will be passed to
1533 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1534 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1535 as not present.
1536 """
1537 self.logger.info("Creating new instance for entry {}".format(name))
1538 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1539 "availability_zone_index {} availability_zone_list {}"\
1540 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1541 availability_zone_index, availability_zone_list))
1542
1543 #new vm name = vmname + tenant_id + uuid
1544 new_vm_name = [name, '-', str(uuid.uuid4())]
1545 vmname_andid = ''.join(new_vm_name)
1546
1547 for net in net_list:
1548 if net['type'] == "PCI-PASSTHROUGH":
1549 raise vimconn.vimconnNotSupportedException(
1550 "Current vCD version does not support type : {}".format(net['type']))
1551
1552 if len(net_list) > 10:
1553 raise vimconn.vimconnNotSupportedException(
1554 "The VM hardware versions 7 and above support upto 10 NICs only")
1555
1556 # if vm already deployed we return existing uuid
1557 # we check for presence of VDC, Catalog entry and Flavor.
1558 org, vdc = self.get_vdc_details()
1559 if vdc is None:
1560 raise vimconn.vimconnNotFoundException(
1561 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1562 catalogs = org.list_catalogs()
1563 if catalogs is None:
1564 #Retry once, if failed by refreshing token
1565 self.get_token()
1566 org = Org(self.client, resource=self.client.get_org())
1567 catalogs = org.list_catalogs()
1568 if catalogs is None:
1569 raise vimconn.vimconnNotFoundException(
1570 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1571
1572 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1573 if catalog_hash_name:
1574 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1575 else:
1576 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1577 "(Failed retrieve catalog information {})".format(name, image_id))
1578
1579 # Set vCPU and Memory based on flavor.
1580 vm_cpus = None
1581 vm_memory = None
1582 vm_disk = None
1583 numas = None
1584
1585 if flavor_id is not None:
1586 if flavor_id not in vimconnector.flavorlist:
1587 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1588 "Failed retrieve flavor information "
1589 "flavor id {}".format(name, flavor_id))
1590 else:
1591 try:
1592 flavor = vimconnector.flavorlist[flavor_id]
1593 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1594 vm_memory = flavor[FLAVOR_RAM_KEY]
1595 vm_disk = flavor[FLAVOR_DISK_KEY]
1596 extended = flavor.get("extended", None)
1597 if extended:
1598 numas=extended.get("numas", None)
1599
1600 except Exception as exp:
1601 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1602
1603 # image upload creates template name as catalog name space Template.
1604 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1605 power_on = 'false'
1606 if start:
1607 power_on = 'true'
1608
1609 # client must provide at least one entry in net_list if not we report error
1610 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1611 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1612 primary_net = None
1613 primary_netname = None
1614 primary_net_href = None
1615 network_mode = 'bridged'
1616 if net_list is not None and len(net_list) > 0:
1617 for net in net_list:
1618 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1619 primary_net = net
1620 if primary_net is None:
1621 primary_net = net_list[0]
1622
1623 try:
1624 primary_net_id = primary_net['net_id']
1625 url_list = [self.url, '/api/network/', primary_net_id]
1626 primary_net_href = ''.join(url_list)
1627 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1628 if 'name' in network_dict:
1629 primary_netname = network_dict['name']
1630
1631 except KeyError:
1632 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1633 else:
1634 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1635
1636 # use: 'data', 'bridge', 'mgmt'
1637 # create vApp. Set vcpu and ram based on flavor id.
1638 try:
1639 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1640 if not vdc_obj:
1641 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1642
1643 for retry in (1,2):
1644 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1645 catalog_items = [items.attrib]
1646
1647 if len(catalog_items) == 1:
1648 if self.client:
1649 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1650 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1651
1652 response = self.perform_request(req_type='GET',
1653 url=catalog_items[0].get('href'),
1654 headers=headers)
1655 catalogItem = XmlElementTree.fromstring(response.content)
1656 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1657 vapp_tempalte_href = entity.get("href")
1658
1659 response = self.perform_request(req_type='GET',
1660 url=vapp_tempalte_href,
1661 headers=headers)
1662 if response.status_code != requests.codes.ok:
1663 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1664 response.status_code))
1665 else:
1666 result = (response.content).replace("\n"," ")
1667
1668 vapp_template_tree = XmlElementTree.fromstring(response.content)
1669 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1670 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1671 vm_name = vm_element.get('name')
1672 vm_id = vm_element.get('id')
1673 vm_href = vm_element.get('href')
1674
1675 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1676 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1677 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1678
1679 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1680 vdc_id = vdc.get('id').split(':')[-1]
1681 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1682 vdc_id)
1683 data = """<?xml version="1.0" encoding="UTF-8"?>
1684 <InstantiateVAppTemplateParams
1685 xmlns="http://www.vmware.com/vcloud/v1.5"
1686 name="{}"
1687 deploy="false"
1688 powerOn="false"
1689 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1690 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1691 <Description>Vapp instantiation</Description>
1692 <InstantiationParams>
1693 <NetworkConfigSection>
1694 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1695 <NetworkConfig networkName="{}">
1696 <Configuration>
1697 <ParentNetwork href="{}" />
1698 <FenceMode>bridged</FenceMode>
1699 </Configuration>
1700 </NetworkConfig>
1701 </NetworkConfigSection>
1702 <LeaseSettingsSection
1703 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1704 <ovf:Info>Lease Settings</ovf:Info>
1705 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1706 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1707 </LeaseSettingsSection>
1708 </InstantiationParams>
1709 <Source href="{}"/>
1710 <SourcedItem>
1711 <Source href="{}" id="{}" name="{}"
1712 type="application/vnd.vmware.vcloud.vm+xml"/>
1713 <VmGeneralParams>
1714 <NeedsCustomization>false</NeedsCustomization>
1715 </VmGeneralParams>
1716 <InstantiationParams>
1717 <NetworkConnectionSection>
1718 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1719 <NetworkConnection network="{}">
1720 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1721 <IsConnected>true</IsConnected>
1722 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1723 </NetworkConnection>
1724 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1725 <ovf:Info>Virtual hardware requirements</ovf:Info>
1726 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1727 xmlns:vmw="http://www.vmware.com/schema/ovf">
1728 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1729 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1730 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1731 <rasd:InstanceID>4</rasd:InstanceID>
1732 <rasd:Reservation>0</rasd:Reservation>
1733 <rasd:ResourceType>3</rasd:ResourceType>
1734 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1735 <rasd:Weight>0</rasd:Weight>
1736 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1737 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1738 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1739 <rasd:Description>Memory Size</rasd:Description>
1740 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1741 <rasd:InstanceID>5</rasd:InstanceID>
1742 <rasd:Reservation>0</rasd:Reservation>
1743 <rasd:ResourceType>4</rasd:ResourceType>
1744 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1745 <rasd:Weight>0</rasd:Weight>
1746 </ovf:Item>
1747 </ovf:VirtualHardwareSection>
1748 </InstantiationParams>
1749 </SourcedItem>
1750 <AllEULAsAccepted>false</AllEULAsAccepted>
1751 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1752 primary_netname,
1753 primary_net_href,
1754 vapp_tempalte_href,
1755 vm_href,
1756 vm_id,
1757 vm_name,
1758 primary_netname,
1759 cpu=cpus,
1760 core=cores,
1761 memory=memory_mb)
1762
1763 response = self.perform_request(req_type='POST',
1764 url=instantiate_vapp_href,
1765 headers=headers,
1766 data=data)
1767
1768 if response.status_code != 201:
1769 self.logger.error("REST call {} failed reason : {}"\
1770 "status code : {}".format(instantiate_vapp_href,
1771 response.content,
1772 response.status_code))
1773 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1774 "vAapp {}".format(vmname_andid))
1775 else:
1776 vapptask = self.get_task_from_response(response.content)
1777
1778 if vapptask is None and retry==1:
1779 self.get_token() # Retry getting token
1780 continue
1781 else:
1782 break
1783
1784 if vapptask is None or vapptask is False:
1785 raise vimconn.vimconnUnexpectedResponse(
1786 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1787
1788 # wait for task to complete
1789 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1790
1791 if result.get('status') == 'success':
1792 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1793 else:
1794 raise vimconn.vimconnUnexpectedResponse(
1795 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1796
1797 except Exception as exp:
1798 raise vimconn.vimconnUnexpectedResponse(
1799 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1800
1801 # we should have now vapp in undeployed state.
1802 try:
1803 vdc_obj = VDC(self.client, href=vdc.get('href'))
1804 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1805 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1806 vapp = VApp(self.client, resource=vapp_resource)
1807
1808 except Exception as exp:
1809 raise vimconn.vimconnUnexpectedResponse(
1810 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1811 .format(vmname_andid, exp))
1812
1813 if vapp_uuid is None:
1814 raise vimconn.vimconnUnexpectedResponse(
1815 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1816 vmname_andid))
1817
1818 #Add PCI passthrough/SRIOV configrations
1819 vm_obj = None
1820 pci_devices_info = []
1821 reserve_memory = False
1822
1823 for net in net_list:
1824 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1825 pci_devices_info.append(net)
1826 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1827 reserve_memory = True
1828
1829 #Add PCI
1830 if len(pci_devices_info) > 0:
1831 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1832 vmname_andid ))
1833 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1834 pci_devices_info,
1835 vmname_andid)
1836 if PCI_devices_status:
1837 self.logger.info("Added PCI devives {} to VM {}".format(
1838 pci_devices_info,
1839 vmname_andid)
1840 )
1841 reserve_memory = True
1842 else:
1843 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1844 pci_devices_info,
1845 vmname_andid)
1846 )
1847
1848 # Modify vm disk
1849 if vm_disk:
1850 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1851 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1852 if result :
1853 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1854
1855 #Add new or existing disks to vApp
1856 if disk_list:
1857 added_existing_disk = False
1858 for disk in disk_list:
1859 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1860 image_id = disk['image_id']
1861 # Adding CD-ROM to VM
1862 # will revisit code once specification ready to support this feature
1863 self.insert_media_to_vm(vapp, image_id)
1864 elif "image_id" in disk and disk["image_id"] is not None:
1865 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1866 disk["image_id"] , vapp_uuid))
1867 self.add_existing_disk(catalogs=catalogs,
1868 image_id=disk["image_id"],
1869 size = disk["size"],
1870 template_name=templateName,
1871 vapp_uuid=vapp_uuid
1872 )
1873 added_existing_disk = True
1874 else:
1875 #Wait till added existing disk gets reflected into vCD database/API
1876 if added_existing_disk:
1877 time.sleep(5)
1878 added_existing_disk = False
1879 self.add_new_disk(vapp_uuid, disk['size'])
1880
1881 if numas:
1882 # Assigning numa affinity setting
1883 for numa in numas:
1884 if 'paired-threads-id' in numa:
1885 paired_threads_id = numa['paired-threads-id']
1886 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1887
1888 # add NICs & connect to networks in netlist
1889 try:
1890 vdc_obj = VDC(self.client, href=vdc.get('href'))
1891 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1892 vapp = VApp(self.client, resource=vapp_resource)
1893 vapp_id = vapp_resource.get('id').split(':')[-1]
1894
1895 self.logger.info("Removing primary NIC: ")
1896 # First remove all NICs so that NIC properties can be adjusted as needed
1897 self.remove_primary_network_adapter_from_all_vms(vapp)
1898
1899 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1900 primary_nic_index = 0
1901 nicIndex = 0
1902 for net in net_list:
1903 # openmano uses network id in UUID format.
1904 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1905 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1906 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1907
1908 if 'net_id' not in net:
1909 continue
1910
1911 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1912 #Same will be returned in refresh_vms_status() as vim_interface_id
1913 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1914
1915 interface_net_id = net['net_id']
1916 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1917 interface_network_mode = net['use']
1918
1919 if interface_network_mode == 'mgmt':
1920 primary_nic_index = nicIndex
1921
1922 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1923 - DHCP (The IP address is obtained from a DHCP service.)
1924 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1925 - NONE (No IP addressing mode specified.)"""
1926
1927 if primary_netname is not None:
1928 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1929 nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
1930 #For python3
1931 #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1932 if len(nets) == 1:
1933 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1934
1935 if interface_net_name != primary_netname:
1936 # connect network to VM - with all DHCP by default
1937 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1938 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1939
1940 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1941 nic_type = 'VMXNET3'
1942 if 'type' in net and net['type'] not in type_list:
1943 # fetching nic type from vnf
1944 if 'model' in net:
1945 if net['model'] is not None:
1946 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1947 nic_type = 'VMXNET3'
1948 else:
1949 nic_type = net['model']
1950
1951 self.logger.info("new_vminstance(): adding network adapter "\
1952 "to a network {}".format(nets[0].get('name')))
1953 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1954 primary_nic_index,
1955 nicIndex,
1956 net,
1957 nic_type=nic_type)
1958 else:
1959 self.logger.info("new_vminstance(): adding network adapter "\
1960 "to a network {}".format(nets[0].get('name')))
1961 if net['type'] in ['SR-IOV', 'VF']:
1962 nic_type = net['type']
1963 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1964 primary_nic_index,
1965 nicIndex,
1966 net,
1967 nic_type=nic_type)
1968 nicIndex += 1
1969
1970 # cloud-init for ssh-key injection
1971 if cloud_config:
1972 # Create a catalog which will be carrying the config drive ISO
1973 # This catalog is deleted during vApp deletion. The catalog name carries
1974 # vApp UUID and thats how it gets identified during its deletion.
1975 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1976 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1977 config_drive_catalog_name))
1978 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1979 if config_drive_catalog_id is None:
1980 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1981 "ISO".format(config_drive_catalog_name)
1982 raise Exception(error_msg)
1983
1984 # Create config-drive ISO
1985 _, userdata = self._create_user_data(cloud_config)
1986 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1987 iso_path = self.create_config_drive_iso(userdata)
1988 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1989
1990 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1991 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1992 # Attach the config-drive ISO to the VM
1993 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1994 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
1995 time.sleep(5)
1996 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1997 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1998
1999 # If VM has PCI devices or SRIOV reserve memory for VM
2000 if reserve_memory:
2001 self.reserve_memory_for_all_vms(vapp, memory_mb)
2002
2003 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
2004
2005 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2006 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2007 if result.get('status') == 'success':
2008 self.logger.info("new_vminstance(): Successfully power on "\
2009 "vApp {}".format(vmname_andid))
2010 else:
2011 self.logger.error("new_vminstance(): failed to power on vApp "\
2012 "{}".format(vmname_andid))
2013
2014 except Exception as exp:
2015 try:
2016 self.delete_vminstance(vapp_uuid)
2017 except Exception as exp2:
2018 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2019 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2020 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
2021 .format(name, exp))
2022 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
2023 .format(name, exp))
2024
2025 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2026 wait_time = 0
2027 vapp_uuid = None
2028 while wait_time <= MAX_WAIT_TIME:
2029 try:
2030 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2031 vapp = VApp(self.client, resource=vapp_resource)
2032 except Exception as exp:
2033 raise vimconn.vimconnUnexpectedResponse(
2034 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
2035 .format(vmname_andid, exp))
2036
2037 #if vapp and vapp.me.deployed:
2038 if vapp and vapp_resource.get('deployed') == 'true':
2039 vapp_uuid = vapp_resource.get('id').split(':')[-1]
2040 break
2041 else:
2042 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
2043 time.sleep(INTERVAL_TIME)
2044
2045 wait_time +=INTERVAL_TIME
2046
2047 #SET Affinity Rule for VM
2048 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2049 #While creating VIM account user has to pass the Host Group names in availability_zone list
2050 #"availability_zone" is a part of VIM "config" parameters
2051 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2052 #Host groups are referred as availability zones
2053 #With following procedure, deployed VM will be added into a VM group.
2054 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2055 if(availability_zone_list):
2056 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2057 #Admin access required for creating Affinity rules
2058 client = self.connect_as_admin()
2059 if not client:
2060 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2061 else:
2062 self.client = client
2063 if self.client:
2064 headers = {'Accept':'application/*+xml;version=27.0',
2065 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2066 #Step1: Get provider vdc details from organization
2067 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2068 if pvdc_href is not None:
2069 #Step2: Found required pvdc, now get resource pool information
2070 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2071 if respool_href is None:
2072 #Raise error if respool_href not found
2073 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2074 .format(pvdc_href)
2075 self.log_message(msg)
2076
2077 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2078 # get availability Zone
2079 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2080 # check if provided av zone(hostGroup) is present in vCD VIM
2081 status = self.check_availibility_zone(vm_az, respool_href, headers)
2082 if status is False:
2083 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2084 "resource pool {} status: {}".format(vm_az,respool_href,status)
2085 self.log_message(msg)
2086 else:
2087 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2088
2089 #Step4: Find VM group references to create vm group
2090 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2091 if vmgrp_href == None:
2092 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2093 self.log_message(msg)
2094
2095 #Step5: Create a VmGroup with name az_VmGroup
2096 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2097 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2098 if status is not True:
2099 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2100 self.log_message(msg)
2101
2102 #VM Group url to add vms to vm group
2103 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2104
2105 #Step6: Add VM to VM Group
2106 #Find VM uuid from vapp_uuid
2107 vm_details = self.get_vapp_details_rest(vapp_uuid)
2108 vm_uuid = vm_details['vmuuid']
2109
2110 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2111 if status is not True:
2112 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2113 self.log_message(msg)
2114
2115 #Step7: Create VM to Host affinity rule
2116 addrule_href = self.get_add_rule_reference (respool_href, headers)
2117 if addrule_href is None:
2118 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2119 .format(respool_href)
2120 self.log_message(msg)
2121
2122 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2123 if status is False:
2124 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2125 .format(name, vm_az)
2126 self.log_message(msg)
2127 else:
2128 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2129 .format(name, vm_az))
2130 #Reset token to a normal user to perform other operations
2131 self.get_token()
2132
2133 if vapp_uuid is not None:
2134 return vapp_uuid, None
2135 else:
2136 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2137
2138 def create_config_drive_iso(self, user_data):
2139 tmpdir = tempfile.mkdtemp()
2140 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2141 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2142 os.makedirs(latest_dir)
2143 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2144 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2145 userdata_file_obj.write(user_data)
2146 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2147 "launch_index": 0,
2148 "name": "ConfigDrive",
2149 "uuid": str(uuid.uuid4())}
2150 )
2151 )
2152 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2153 iso_path=iso_path, source_dir_path=tmpdir)
2154 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2155 try:
2156 FNULL = open(os.devnull, 'w')
2157 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2158 except subprocess.CalledProcessError as e:
2159 shutil.rmtree(tmpdir, ignore_errors=True)
2160 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2161 self.logger.error(error_msg)
2162 raise Exception(error_msg)
2163 return iso_path
2164
2165 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2166 if not os.path.isfile(iso_file_path):
2167 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2168 self.logger.error(error_msg)
2169 raise Exception(error_msg)
2170 iso_file_stat = os.stat(iso_file_path)
2171 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2172 <Media
2173 xmlns="http://www.vmware.com/vcloud/v1.5"
2174 name="{iso_name}"
2175 size="{iso_size}"
2176 imageType="iso">
2177 <Description>ISO image for config-drive</Description>
2178 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2179 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2180 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2181 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2182 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2183 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2184
2185 if response.status_code != 201:
2186 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2187 self.logger.error(error_msg)
2188 raise Exception(error_msg)
2189
2190 catalogItem = XmlElementTree.fromstring(response.content)
2191 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2192 entity_href = entity.get('href')
2193
2194 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2195 if response.status_code != 200:
2196 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2197
2198 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2199 if match:
2200 media_upload_href = match.group(1)
2201 else:
2202 raise Exception('Could not parse the upload URL for the media file from the last response')
2203 upload_iso_task = self.get_task_from_response(response.content)
2204 headers['Content-Type'] = 'application/octet-stream'
2205 response = self.perform_request(req_type='PUT',
2206 url=media_upload_href,
2207 headers=headers,
2208 data=open(iso_file_path, 'rb'))
2209
2210 if response.status_code != 200:
2211 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2212 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2213 if result.get('status') != 'success':
2214 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2215
2216 def get_vcd_availibility_zones(self,respool_href, headers):
2217 """ Method to find presence of av zone is VIM resource pool
2218
2219 Args:
2220 respool_href - resource pool href
2221 headers - header information
2222
2223 Returns:
2224 vcd_az - list of azone present in vCD
2225 """
2226 vcd_az = []
2227 url=respool_href
2228 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2229
2230 if resp.status_code != requests.codes.ok:
2231 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2232 else:
2233 #Get the href to hostGroups and find provided hostGroup is present in it
2234 resp_xml = XmlElementTree.fromstring(resp.content)
2235 for child in resp_xml:
2236 if 'VMWProviderVdcResourcePool' in child.tag:
2237 for schild in child:
2238 if 'Link' in schild.tag:
2239 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2240 hostGroup = schild.attrib.get('href')
2241 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2242 if hg_resp.status_code != requests.codes.ok:
2243 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2244 else:
2245 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2246 for hostGroup in hg_resp_xml:
2247 if 'HostGroup' in hostGroup.tag:
2248 #append host group name to the list
2249 vcd_az.append(hostGroup.attrib.get("name"))
2250 return vcd_az
2251
2252
2253 def set_availability_zones(self):
2254 """
2255 Set vim availability zone
2256 """
2257
2258 vim_availability_zones = None
2259 availability_zone = None
2260 if 'availability_zone' in self.config:
2261 vim_availability_zones = self.config.get('availability_zone')
2262 if isinstance(vim_availability_zones, str):
2263 availability_zone = [vim_availability_zones]
2264 elif isinstance(vim_availability_zones, list):
2265 availability_zone = vim_availability_zones
2266 else:
2267 return availability_zone
2268
2269 return availability_zone
2270
2271
2272 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2273 """
2274 Return the availability zone to be used by the created VM.
2275 returns: The VIM availability zone to be used or None
2276 """
2277 if availability_zone_index is None:
2278 if not self.config.get('availability_zone'):
2279 return None
2280 elif isinstance(self.config.get('availability_zone'), str):
2281 return self.config['availability_zone']
2282 else:
2283 return self.config['availability_zone'][0]
2284
2285 vim_availability_zones = self.availability_zone
2286
2287 # check if VIM offer enough availability zones describe in the VNFD
2288 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2289 # check if all the names of NFV AV match VIM AV names
2290 match_by_index = False
2291 for av in availability_zone_list:
2292 if av not in vim_availability_zones:
2293 match_by_index = True
2294 break
2295 if match_by_index:
2296 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2297 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2298 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2299 self.logger.debug("VIM Availability zones will be used by index")
2300 return vim_availability_zones[availability_zone_index]
2301 else:
2302 return availability_zone_list[availability_zone_index]
2303 else:
2304 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2305
2306
2307 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2308 """ Method to create VM to Host Affinity rule in vCD
2309
2310 Args:
2311 addrule_href - href to make a POST request
2312 vmgrpname - name of the VM group created
2313 hostgrpnmae - name of the host group created earlier
2314 polarity - Affinity or Anti-affinity (default: Affinity)
2315 headers - headers to make REST call
2316
2317 Returns:
2318 True- if rule is created
2319 False- Failed to create rule due to some error
2320
2321 """
2322 task_status = False
2323 rule_name = polarity + "_" + vmgrpname
2324 payload = """<?xml version="1.0" encoding="UTF-8"?>
2325 <vmext:VMWVmHostAffinityRule
2326 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2327 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2328 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2329 <vcloud:Name>{}</vcloud:Name>
2330 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2331 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2332 <vcloud:Polarity>{}</vcloud:Polarity>
2333 <vmext:HostGroupName>{}</vmext:HostGroupName>
2334 <vmext:VmGroupName>{}</vmext:VmGroupName>
2335 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2336
2337 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2338
2339 if resp.status_code != requests.codes.accepted:
2340 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2341 task_status = False
2342 return task_status
2343 else:
2344 affinity_task = self.get_task_from_response(resp.content)
2345 self.logger.debug ("affinity_task: {}".format(affinity_task))
2346 if affinity_task is None or affinity_task is False:
2347 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2348 # wait for task to complete
2349 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2350 if result.get('status') == 'success':
2351 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2352 return True
2353 else:
2354 raise vimconn.vimconnUnexpectedResponse(
2355 "failed to create affinity rule {}".format(rule_name))
2356
2357
2358 def get_add_rule_reference (self, respool_href, headers):
2359 """ This method finds href to add vm to host affinity rule to vCD
2360
2361 Args:
2362 respool_href- href to resource pool
2363 headers- header information to make REST call
2364
2365 Returns:
2366 None - if no valid href to add rule found or
2367 addrule_href - href to add vm to host affinity rule of resource pool
2368 """
2369 addrule_href = None
2370 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2371
2372 if resp.status_code != requests.codes.ok:
2373 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2374 else:
2375
2376 resp_xml = XmlElementTree.fromstring(resp.content)
2377 for child in resp_xml:
2378 if 'VMWProviderVdcResourcePool' in child.tag:
2379 for schild in child:
2380 if 'Link' in schild.tag:
2381 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2382 schild.attrib.get('rel') == "add":
2383 addrule_href = schild.attrib.get('href')
2384 break
2385
2386 return addrule_href
2387
2388
2389 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2390 """ Method to add deployed VM to newly created VM Group.
2391 This is required to create VM to Host affinity in vCD
2392
2393 Args:
2394 vm_uuid- newly created vm uuid
2395 vmGroupNameURL- URL to VM Group name
2396 vmGroup_name- Name of VM group created
2397 headers- Headers for REST request
2398
2399 Returns:
2400 True- if VM added to VM group successfully
2401 False- if any error encounter
2402 """
2403
2404 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2405
2406 if addvm_resp.status_code != requests.codes.ok:
2407 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2408 .format(vmGroupNameURL, addvm_resp.status_code))
2409 return False
2410 else:
2411 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2412 for child in resp_xml:
2413 if child.tag.split('}')[1] == 'Link':
2414 if child.attrib.get("rel") == "addVms":
2415 addvmtogrpURL = child.attrib.get("href")
2416
2417 #Get vm details
2418 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2419 vmdetailsURL = ''.join(url_list)
2420
2421 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2422
2423 if resp.status_code != requests.codes.ok:
2424 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2425 return False
2426
2427 #Parse VM details
2428 resp_xml = XmlElementTree.fromstring(resp.content)
2429 if resp_xml.tag.split('}')[1] == "Vm":
2430 vm_id = resp_xml.attrib.get("id")
2431 vm_name = resp_xml.attrib.get("name")
2432 vm_href = resp_xml.attrib.get("href")
2433 #print vm_id, vm_name, vm_href
2434 #Add VM into VMgroup
2435 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2436 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2437 xmlns="http://www.vmware.com/vcloud/versions" \
2438 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2439 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2440 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2441 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2442 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2443 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2444 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2445 <ns2:VmReference href="{}" id="{}" name="{}" \
2446 type="application/vnd.vmware.vcloud.vm+xml" />\
2447 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2448
2449 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2450
2451 if addvmtogrp_resp.status_code != requests.codes.accepted:
2452 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2453 return False
2454 else:
2455 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2456 return True
2457
2458
2459 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2460 """Method to create a VM group in vCD
2461
2462 Args:
2463 vmgroup_name : Name of VM group to be created
2464 vmgroup_href : href for vmgroup
2465 headers- Headers for REST request
2466 """
2467 #POST to add URL with required data
2468 vmgroup_status = False
2469 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2470 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2471 <vmCount>1</vmCount>\
2472 </VMWVmGroup>""".format(vmgroup_name)
2473 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2474
2475 if resp.status_code != requests.codes.accepted:
2476 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2477 return vmgroup_status
2478 else:
2479 vmgroup_task = self.get_task_from_response(resp.content)
2480 if vmgroup_task is None or vmgroup_task is False:
2481 raise vimconn.vimconnUnexpectedResponse(
2482 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2483
2484 # wait for task to complete
2485 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2486
2487 if result.get('status') == 'success':
2488 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2489 #time.sleep(10)
2490 vmgroup_status = True
2491 return vmgroup_status
2492 else:
2493 raise vimconn.vimconnUnexpectedResponse(\
2494 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2495
2496
2497 def find_vmgroup_reference(self, url, headers):
2498 """ Method to create a new VMGroup which is required to add created VM
2499 Args:
2500 url- resource pool href
2501 headers- header information
2502
2503 Returns:
2504 returns href to VM group to create VM group
2505 """
2506 #Perform GET on resource pool to find 'add' link to create VMGroup
2507 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2508 vmgrp_href = None
2509 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2510
2511 if resp.status_code != requests.codes.ok:
2512 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2513 else:
2514 #Get the href to add vmGroup to vCD
2515 resp_xml = XmlElementTree.fromstring(resp.content)
2516 for child in resp_xml:
2517 if 'VMWProviderVdcResourcePool' in child.tag:
2518 for schild in child:
2519 if 'Link' in schild.tag:
2520 #Find href with type VMGroup and rel with add
2521 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2522 and schild.attrib.get('rel') == "add":
2523 vmgrp_href = schild.attrib.get('href')
2524 return vmgrp_href
2525
2526
2527 def check_availibility_zone(self, az, respool_href, headers):
2528 """ Method to verify requested av zone is present or not in provided
2529 resource pool
2530
2531 Args:
2532 az - name of hostgroup (availibility_zone)
2533 respool_href - Resource Pool href
2534 headers - Headers to make REST call
2535 Returns:
2536 az_found - True if availibility_zone is found else False
2537 """
2538 az_found = False
2539 headers['Accept']='application/*+xml;version=27.0'
2540 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2541
2542 if resp.status_code != requests.codes.ok:
2543 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2544 else:
2545 #Get the href to hostGroups and find provided hostGroup is present in it
2546 resp_xml = XmlElementTree.fromstring(resp.content)
2547
2548 for child in resp_xml:
2549 if 'VMWProviderVdcResourcePool' in child.tag:
2550 for schild in child:
2551 if 'Link' in schild.tag:
2552 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2553 hostGroup_href = schild.attrib.get('href')
2554 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2555 if hg_resp.status_code != requests.codes.ok:
2556 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2557 else:
2558 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2559 for hostGroup in hg_resp_xml:
2560 if 'HostGroup' in hostGroup.tag:
2561 if hostGroup.attrib.get("name") == az:
2562 az_found = True
2563 break
2564 return az_found
2565
2566
2567 def get_pvdc_for_org(self, org_vdc, headers):
2568 """ This method gets provider vdc references from organisation
2569
2570 Args:
2571 org_vdc - name of the organisation VDC to find pvdc
2572 headers - headers to make REST call
2573
2574 Returns:
2575 None - if no pvdc href found else
2576 pvdc_href - href to pvdc
2577 """
2578
2579 #Get provider VDC references from vCD
2580 pvdc_href = None
2581 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2582 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2583 url = ''.join(url_list)
2584
2585 response = self.perform_request(req_type='GET',url=url, headers=headers)
2586 if response.status_code != requests.codes.ok:
2587 self.logger.debug ("REST API call {} failed. Return status code {}"\
2588 .format(url, response.status_code))
2589 else:
2590 xmlroot_response = XmlElementTree.fromstring(response.content)
2591 for child in xmlroot_response:
2592 if 'ProviderVdcReference' in child.tag:
2593 pvdc_href = child.attrib.get('href')
2594 #Get vdcReferences to find org
2595 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2596 if pvdc_resp.status_code != requests.codes.ok:
2597 raise vimconn.vimconnException("REST API call {} failed. "\
2598 "Return status code {}"\
2599 .format(url, pvdc_resp.status_code))
2600
2601 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2602 for child in pvdc_resp_xml:
2603 if 'Link' in child.tag:
2604 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2605 vdc_href = child.attrib.get('href')
2606
2607 #Check if provided org is present in vdc
2608 vdc_resp = self.perform_request(req_type='GET',
2609 url=vdc_href,
2610 headers=headers)
2611 if vdc_resp.status_code != requests.codes.ok:
2612 raise vimconn.vimconnException("REST API call {} failed. "\
2613 "Return status code {}"\
2614 .format(url, vdc_resp.status_code))
2615 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2616 for child in vdc_resp_xml:
2617 if 'VdcReference' in child.tag:
2618 if child.attrib.get('name') == org_vdc:
2619 return pvdc_href
2620
2621
2622 def get_resource_pool_details(self, pvdc_href, headers):
2623 """ Method to get resource pool information.
2624 Host groups are property of resource group.
2625 To get host groups, we need to GET details of resource pool.
2626
2627 Args:
2628 pvdc_href: href to pvdc details
2629 headers: headers
2630
2631 Returns:
2632 respool_href - Returns href link reference to resource pool
2633 """
2634 respool_href = None
2635 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2636
2637 if resp.status_code != requests.codes.ok:
2638 self.logger.debug ("REST API call {} failed. Return status code {}"\
2639 .format(pvdc_href, resp.status_code))
2640 else:
2641 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2642 for child in respool_resp_xml:
2643 if 'Link' in child.tag:
2644 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2645 respool_href = child.attrib.get("href")
2646 break
2647 return respool_href
2648
2649
2650 def log_message(self, msg):
2651 """
2652 Method to log error messages related to Affinity rule creation
2653 in new_vminstance & raise Exception
2654 Args :
2655 msg - Error message to be logged
2656
2657 """
2658 #get token to connect vCD as a normal user
2659 self.get_token()
2660 self.logger.debug(msg)
2661 raise vimconn.vimconnException(msg)
2662
2663
2664 ##
2665 ##
2666 ## based on current discussion
2667 ##
2668 ##
2669 ## server:
2670 # created: '2016-09-08T11:51:58'
2671 # description: simple-instance.linux1.1
2672 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2673 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2674 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2675 # status: ACTIVE
2676 # error_msg:
2677 # interfaces: …
2678 #
2679 def get_vminstance(self, vim_vm_uuid=None):
2680 """Returns the VM instance information from VIM"""
2681
2682 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2683
2684 org, vdc = self.get_vdc_details()
2685 if vdc is None:
2686 raise vimconn.vimconnConnectionException(
2687 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2688
2689 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2690 if not vm_info_dict:
2691 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2692 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2693
2694 status_key = vm_info_dict['status']
2695 error = ''
2696 try:
2697 vm_dict = {'created': vm_info_dict['created'],
2698 'description': vm_info_dict['name'],
2699 'status': vcdStatusCode2manoFormat[int(status_key)],
2700 'hostId': vm_info_dict['vmuuid'],
2701 'error_msg': error,
2702 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2703
2704 if 'interfaces' in vm_info_dict:
2705 vm_dict['interfaces'] = vm_info_dict['interfaces']
2706 else:
2707 vm_dict['interfaces'] = []
2708 except KeyError:
2709 vm_dict = {'created': '',
2710 'description': '',
2711 'status': vcdStatusCode2manoFormat[int(-1)],
2712 'hostId': vm_info_dict['vmuuid'],
2713 'error_msg': "Inconsistency state",
2714 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2715
2716 return vm_dict
2717
2718 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2719 """Method poweroff and remove VM instance from vcloud director network.
2720
2721 Args:
2722 vm__vim_uuid: VM UUID
2723
2724 Returns:
2725 Returns the instance identifier
2726 """
2727
2728 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2729
2730 org, vdc = self.get_vdc_details()
2731 vdc_obj = VDC(self.client, href=vdc.get('href'))
2732 if vdc_obj is None:
2733 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2734 self.tenant_name))
2735 raise vimconn.vimconnException(
2736 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2737
2738 try:
2739 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2740 if vapp_name is None:
2741 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2742 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2743 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2744 vapp_resource = vdc_obj.get_vapp(vapp_name)
2745 vapp = VApp(self.client, resource=vapp_resource)
2746
2747 # Delete vApp and wait for status change if task executed and vApp is None.
2748
2749 if vapp:
2750 if vapp_resource.get('deployed') == 'true':
2751 self.logger.info("Powering off vApp {}".format(vapp_name))
2752 #Power off vApp
2753 powered_off = False
2754 wait_time = 0
2755 while wait_time <= MAX_WAIT_TIME:
2756 power_off_task = vapp.power_off()
2757 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2758
2759 if result.get('status') == 'success':
2760 powered_off = True
2761 break
2762 else:
2763 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2764 time.sleep(INTERVAL_TIME)
2765
2766 wait_time +=INTERVAL_TIME
2767 if not powered_off:
2768 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2769 else:
2770 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2771
2772 #Undeploy vApp
2773 self.logger.info("Undeploy vApp {}".format(vapp_name))
2774 wait_time = 0
2775 undeployed = False
2776 while wait_time <= MAX_WAIT_TIME:
2777 vapp = VApp(self.client, resource=vapp_resource)
2778 if not vapp:
2779 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2780 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2781 undeploy_task = vapp.undeploy()
2782
2783 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2784 if result.get('status') == 'success':
2785 undeployed = True
2786 break
2787 else:
2788 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2789 time.sleep(INTERVAL_TIME)
2790
2791 wait_time +=INTERVAL_TIME
2792
2793 if not undeployed:
2794 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2795
2796 # delete vapp
2797 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2798
2799 if vapp is not None:
2800 wait_time = 0
2801 result = False
2802
2803 while wait_time <= MAX_WAIT_TIME:
2804 vapp = VApp(self.client, resource=vapp_resource)
2805 if not vapp:
2806 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2807 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2808
2809 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2810
2811 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2812 if result.get('status') == 'success':
2813 break
2814 else:
2815 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2816 time.sleep(INTERVAL_TIME)
2817
2818 wait_time +=INTERVAL_TIME
2819
2820 if result is None:
2821 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2822 else:
2823 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2824 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2825 catalog_list = self.get_image_list()
2826 try:
2827 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2828 if catalog_['name'] == config_drive_catalog_name][0]
2829 except IndexError:
2830 pass
2831 if config_drive_catalog_id:
2832 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2833 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2834 self.delete_image(config_drive_catalog_id)
2835 return vm__vim_uuid
2836 except:
2837 self.logger.debug(traceback.format_exc())
2838 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2839
2840
2841 def refresh_vms_status(self, vm_list):
2842 """Get the status of the virtual machines and their interfaces/ports
2843 Params: the list of VM identifiers
2844 Returns a dictionary with:
2845 vm_id: #VIM id of this Virtual Machine
2846 status: #Mandatory. Text with one of:
2847 # DELETED (not found at vim)
2848 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2849 # OTHER (Vim reported other status not understood)
2850 # ERROR (VIM indicates an ERROR status)
2851 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2852 # CREATING (on building process), ERROR
2853 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2854 #
2855 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2856 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2857 interfaces:
2858 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2859 mac_address: #Text format XX:XX:XX:XX:XX:XX
2860 vim_net_id: #network id where this interface is connected
2861 vim_interface_id: #interface/port VIM id
2862 ip_address: #null, or text with IPv4, IPv6 address
2863 """
2864
2865 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2866
2867 org,vdc = self.get_vdc_details()
2868 if vdc is None:
2869 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2870
2871 vms_dict = {}
2872 nsx_edge_list = []
2873 for vmuuid in vm_list:
2874 vapp_name = self.get_namebyvappid(vmuuid)
2875 if vapp_name is not None:
2876
2877 try:
2878 vm_pci_details = self.get_vm_pci_details(vmuuid)
2879 vdc_obj = VDC(self.client, href=vdc.get('href'))
2880 vapp_resource = vdc_obj.get_vapp(vapp_name)
2881 the_vapp = VApp(self.client, resource=vapp_resource)
2882
2883 vm_details = {}
2884 for vm in the_vapp.get_all_vms():
2885 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2886 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2887 response = self.perform_request(req_type='GET',
2888 url=vm.get('href'),
2889 headers=headers)
2890
2891 if response.status_code != 200:
2892 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2893 "status code : {}".format(vm.get('href'),
2894 response.content,
2895 response.status_code))
2896 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2897 "VM details")
2898 xmlroot = XmlElementTree.fromstring(response.content)
2899
2900
2901 result = response.content.replace("\n"," ")
2902 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2903 if hdd_match:
2904 hdd_mb = hdd_match.group(1)
2905 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2906 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2907 if cpus_match:
2908 cpus = cpus_match.group(1)
2909 vm_details['cpus'] = int(cpus) if cpus else None
2910 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2911 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2912 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2913 vm_details['id'] = xmlroot.get('id')
2914 vm_details['name'] = xmlroot.get('name')
2915 vm_info = [vm_details]
2916 if vm_pci_details:
2917 vm_info[0].update(vm_pci_details)
2918
2919 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2920 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2921 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2922
2923 # get networks
2924 vm_ip = None
2925 vm_mac = None
2926 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2927 for network in networks:
2928 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2929 vm_mac = mac_s.group(1) if mac_s else None
2930 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2931 vm_ip = ip_s.group(1) if ip_s else None
2932
2933 if vm_ip is None:
2934 if not nsx_edge_list:
2935 nsx_edge_list = self.get_edge_details()
2936 if nsx_edge_list is None:
2937 raise vimconn.vimconnException("refresh_vms_status:"\
2938 "Failed to get edge details from NSX Manager")
2939 if vm_mac is not None:
2940 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2941
2942 net_s = re.search('network="(.*?)"',network)
2943 network_name = net_s.group(1) if net_s else None
2944
2945 vm_net_id = self.get_network_id_by_name(network_name)
2946 interface = {"mac_address": vm_mac,
2947 "vim_net_id": vm_net_id,
2948 "vim_interface_id": vm_net_id,
2949 "ip_address": vm_ip}
2950
2951 vm_dict["interfaces"].append(interface)
2952
2953 # add a vm to vm dict
2954 vms_dict.setdefault(vmuuid, vm_dict)
2955 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2956 except Exception as exp:
2957 self.logger.debug("Error in response {}".format(exp))
2958 self.logger.debug(traceback.format_exc())
2959
2960 return vms_dict
2961
2962
2963 def get_edge_details(self):
2964 """Get the NSX edge list from NSX Manager
2965 Returns list of NSX edges
2966 """
2967 edge_list = []
2968 rheaders = {'Content-Type': 'application/xml'}
2969 nsx_api_url = '/api/4.0/edges'
2970
2971 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2972
2973 try:
2974 resp = requests.get(self.nsx_manager + nsx_api_url,
2975 auth = (self.nsx_user, self.nsx_password),
2976 verify = False, headers = rheaders)
2977 if resp.status_code == requests.codes.ok:
2978 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2979 for edge_pages in paged_Edge_List:
2980 if edge_pages.tag == 'edgePage':
2981 for edge_summary in edge_pages:
2982 if edge_summary.tag == 'pagingInfo':
2983 for element in edge_summary:
2984 if element.tag == 'totalCount' and element.text == '0':
2985 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2986 .format(self.nsx_manager))
2987
2988 if edge_summary.tag == 'edgeSummary':
2989 for element in edge_summary:
2990 if element.tag == 'id':
2991 edge_list.append(element.text)
2992 else:
2993 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2994 .format(self.nsx_manager))
2995
2996 if not edge_list:
2997 raise vimconn.vimconnException("get_edge_details: "\
2998 "No NSX edge details found: {}"
2999 .format(self.nsx_manager))
3000 else:
3001 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
3002 return edge_list
3003 else:
3004 self.logger.debug("get_edge_details: "
3005 "Failed to get NSX edge details from NSX Manager: {}"
3006 .format(resp.content))
3007 return None
3008
3009 except Exception as exp:
3010 self.logger.debug("get_edge_details: "\
3011 "Failed to get NSX edge details from NSX Manager: {}"
3012 .format(exp))
3013 raise vimconn.vimconnException("get_edge_details: "\
3014 "Failed to get NSX edge details from NSX Manager: {}"
3015 .format(exp))
3016
3017
3018 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3019 """Get IP address details from NSX edges, using the MAC address
3020 PARAMS: nsx_edges : List of NSX edges
3021 mac_address : Find IP address corresponding to this MAC address
3022 Returns: IP address corrresponding to the provided MAC address
3023 """
3024
3025 ip_addr = None
3026 rheaders = {'Content-Type': 'application/xml'}
3027
3028 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3029
3030 try:
3031 for edge in nsx_edges:
3032 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
3033
3034 resp = requests.get(self.nsx_manager + nsx_api_url,
3035 auth = (self.nsx_user, self.nsx_password),
3036 verify = False, headers = rheaders)
3037
3038 if resp.status_code == requests.codes.ok:
3039 dhcp_leases = XmlElementTree.fromstring(resp.text)
3040 for child in dhcp_leases:
3041 if child.tag == 'dhcpLeaseInfo':
3042 dhcpLeaseInfo = child
3043 for leaseInfo in dhcpLeaseInfo:
3044 for elem in leaseInfo:
3045 if (elem.tag)=='macAddress':
3046 edge_mac_addr = elem.text
3047 if (elem.tag)=='ipAddress':
3048 ip_addr = elem.text
3049 if edge_mac_addr is not None:
3050 if edge_mac_addr == mac_address:
3051 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
3052 .format(ip_addr, mac_address,edge))
3053 return ip_addr
3054 else:
3055 self.logger.debug("get_ipaddr_from_NSXedge: "\
3056 "Error occurred while getting DHCP lease info from NSX Manager: {}"
3057 .format(resp.content))
3058
3059 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
3060 return None
3061
3062 except XmlElementTree.ParseError as Err:
3063 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
3064
3065
3066 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
3067 """Send and action over a VM instance from VIM
3068 Returns the vm_id if the action was successfully sent to the VIM"""
3069
3070 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
3071 if vm__vim_uuid is None or action_dict is None:
3072 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
3073
3074 org, vdc = self.get_vdc_details()
3075 if vdc is None:
3076 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3077
3078 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3079 if vapp_name is None:
3080 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3081 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3082 else:
3083 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3084
3085 try:
3086 vdc_obj = VDC(self.client, href=vdc.get('href'))
3087 vapp_resource = vdc_obj.get_vapp(vapp_name)
3088 vapp = VApp(self.client, resource=vapp_resource)
3089 if "start" in action_dict:
3090 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3091 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3092 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3093 self.instance_actions_result("start", result, vapp_name)
3094 elif "rebuild" in action_dict:
3095 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3096 rebuild_task = vapp.deploy(power_on=True)
3097 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3098 self.instance_actions_result("rebuild", result, vapp_name)
3099 elif "pause" in action_dict:
3100 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3101 pause_task = vapp.undeploy(action='suspend')
3102 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3103 self.instance_actions_result("pause", result, vapp_name)
3104 elif "resume" in action_dict:
3105 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3106 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3107 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3108 self.instance_actions_result("resume", result, vapp_name)
3109 elif "shutoff" in action_dict or "shutdown" in action_dict:
3110 action_name , value = action_dict.items()[0]
3111 #For python3
3112 #action_name , value = list(action_dict.items())[0]
3113 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3114 shutdown_task = vapp.shutdown()
3115 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3116 if action_name == "shutdown":
3117 self.instance_actions_result("shutdown", result, vapp_name)
3118 else:
3119 self.instance_actions_result("shutoff", result, vapp_name)
3120 elif "forceOff" in action_dict:
3121 result = vapp.undeploy(action='powerOff')
3122 self.instance_actions_result("forceOff", result, vapp_name)
3123 elif "reboot" in action_dict:
3124 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3125 reboot_task = vapp.reboot()
3126 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3127 else:
3128 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3129 return vm__vim_uuid
3130 except Exception as exp :
3131 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3132 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3133
3134 def instance_actions_result(self, action, result, vapp_name):
3135 if result.get('status') == 'success':
3136 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3137 else:
3138 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3139
3140 def get_vminstance_console(self, vm_id, console_type="novnc"):
3141 """
3142 Get a console for the virtual machine
3143 Params:
3144 vm_id: uuid of the VM
3145 console_type, can be:
3146 "novnc" (by default), "xvpvnc" for VNC types,
3147 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3148 Returns dict with the console parameters:
3149 protocol: ssh, ftp, http, https, ...
3150 server: usually ip address
3151 port: the http, ssh, ... port
3152 suffix: extra text, e.g. the http path and query string
3153 """
3154 console_dict = {}
3155
3156 if console_type==None or console_type=='novnc':
3157
3158 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3159
3160 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3161 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3162 response = self.perform_request(req_type='POST',
3163 url=url_rest_call,
3164 headers=headers)
3165
3166 if response.status_code == 403:
3167 response = self.retry_rest('GET', url_rest_call)
3168
3169 if response.status_code != 200:
3170 self.logger.error("REST call {} failed reason : {}"\
3171 "status code : {}".format(url_rest_call,
3172 response.content,
3173 response.status_code))
3174 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3175 "VM Mks ticket details")
3176 s = re.search("<Host>(.*?)</Host>",response.content)
3177 console_dict['server'] = s.group(1) if s else None
3178 s1 = re.search("<Port>(\d+)</Port>",response.content)
3179 console_dict['port'] = s1.group(1) if s1 else None
3180
3181
3182 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3183
3184 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3185 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3186 response = self.perform_request(req_type='POST',
3187 url=url_rest_call,
3188 headers=headers)
3189
3190 if response.status_code == 403:
3191 response = self.retry_rest('GET', url_rest_call)
3192
3193 if response.status_code != 200:
3194 self.logger.error("REST call {} failed reason : {}"\
3195 "status code : {}".format(url_rest_call,
3196 response.content,
3197 response.status_code))
3198 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3199 "VM console details")
3200 s = re.search(">.*?/(vm-\d+.*)</",response.content)
3201 console_dict['suffix'] = s.group(1) if s else None
3202 console_dict['protocol'] = "https"
3203
3204 return console_dict
3205
3206 # NOT USED METHODS in current version
3207
3208 def host_vim2gui(self, host, server_dict):
3209 """Transform host dictionary from VIM format to GUI format,
3210 and append to the server_dict
3211 """
3212 raise vimconn.vimconnNotImplemented("Should have implemented this")
3213
3214 def get_hosts_info(self):
3215 """Get the information of deployed hosts
3216 Returns the hosts content"""
3217 raise vimconn.vimconnNotImplemented("Should have implemented this")
3218
3219 def get_hosts(self, vim_tenant):
3220 """Get the hosts and deployed instances
3221 Returns the hosts content"""
3222 raise vimconn.vimconnNotImplemented("Should have implemented this")
3223
3224 def get_processor_rankings(self):
3225 """Get the processor rankings in the VIM database"""
3226 raise vimconn.vimconnNotImplemented("Should have implemented this")
3227
3228 def new_host(self, host_data):
3229 """Adds a new host to VIM"""
3230 '''Returns status code of the VIM response'''
3231 raise vimconn.vimconnNotImplemented("Should have implemented this")
3232
3233 def new_external_port(self, port_data):
3234 """Adds a external port to VIM"""
3235 '''Returns the port identifier'''
3236 raise vimconn.vimconnNotImplemented("Should have implemented this")
3237
3238 def new_external_network(self, net_name, net_type):
3239 """Adds a external network to VIM (shared)"""
3240 '''Returns the network identifier'''
3241 raise vimconn.vimconnNotImplemented("Should have implemented this")
3242
3243 def connect_port_network(self, port_id, network_id, admin=False):
3244 """Connects a external port to a network"""
3245 '''Returns status code of the VIM response'''
3246 raise vimconn.vimconnNotImplemented("Should have implemented this")
3247
3248 def new_vminstancefromJSON(self, vm_data):
3249 """Adds a VM instance to VIM"""
3250 '''Returns the instance identifier'''
3251 raise vimconn.vimconnNotImplemented("Should have implemented this")
3252
3253 def get_network_name_by_id(self, network_uuid=None):
3254 """Method gets vcloud director network named based on supplied uuid.
3255
3256 Args:
3257 network_uuid: network_id
3258
3259 Returns:
3260 The return network name.
3261 """
3262
3263 if not network_uuid:
3264 return None
3265
3266 try:
3267 org_dict = self.get_org(self.org_uuid)
3268 if 'networks' in org_dict:
3269 org_network_dict = org_dict['networks']
3270 for net_uuid in org_network_dict:
3271 if net_uuid == network_uuid:
3272 return org_network_dict[net_uuid]
3273 except:
3274 self.logger.debug("Exception in get_network_name_by_id")
3275 self.logger.debug(traceback.format_exc())
3276
3277 return None
3278
3279 def get_network_id_by_name(self, network_name=None):
3280 """Method gets vcloud director network uuid based on supplied name.
3281
3282 Args:
3283 network_name: network_name
3284 Returns:
3285 The return network uuid.
3286 network_uuid: network_id
3287 """
3288
3289 if not network_name:
3290 self.logger.debug("get_network_id_by_name() : Network name is empty")
3291 return None
3292
3293 try:
3294 org_dict = self.get_org(self.org_uuid)
3295 if org_dict and 'networks' in org_dict:
3296 org_network_dict = org_dict['networks']
3297 for net_uuid,net_name in org_network_dict.iteritems():
3298 #For python3
3299 #for net_uuid,net_name in org_network_dict.items():
3300 if net_name == network_name:
3301 return net_uuid
3302
3303 except KeyError as exp:
3304 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3305
3306 return None
3307
3308 def list_org_action(self):
3309 """
3310 Method leverages vCloud director and query for available organization for particular user
3311
3312 Args:
3313 vca - is active VCA connection.
3314 vdc_name - is a vdc name that will be used to query vms action
3315
3316 Returns:
3317 The return XML respond
3318 """
3319 url_list = [self.url, '/api/org']
3320 vm_list_rest_call = ''.join(url_list)
3321
3322 if self.client._session:
3323 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3324 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3325
3326 response = self.perform_request(req_type='GET',
3327 url=vm_list_rest_call,
3328 headers=headers)
3329
3330 if response.status_code == 403:
3331 response = self.retry_rest('GET', vm_list_rest_call)
3332
3333 if response.status_code == requests.codes.ok:
3334 return response.content
3335
3336 return None
3337
3338 def get_org_action(self, org_uuid=None):
3339 """
3340 Method leverages vCloud director and retrieve available object for organization.
3341
3342 Args:
3343 org_uuid - vCD organization uuid
3344 self.client - is active connection.
3345
3346 Returns:
3347 The return XML respond
3348 """
3349
3350 if org_uuid is None:
3351 return None
3352
3353 url_list = [self.url, '/api/org/', org_uuid]
3354 vm_list_rest_call = ''.join(url_list)
3355
3356 if self.client._session:
3357 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3358 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3359
3360 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3361 response = self.perform_request(req_type='GET',
3362 url=vm_list_rest_call,
3363 headers=headers)
3364 if response.status_code == 403:
3365 response = self.retry_rest('GET', vm_list_rest_call)
3366
3367 if response.status_code == requests.codes.ok:
3368 return response.content
3369 return None
3370
3371 def get_org(self, org_uuid=None):
3372 """
3373 Method retrieves available organization in vCloud Director
3374
3375 Args:
3376 org_uuid - is a organization uuid.
3377
3378 Returns:
3379 The return dictionary with following key
3380 "network" - for network list under the org
3381 "catalogs" - for network list under the org
3382 "vdcs" - for vdc list under org
3383 """
3384
3385 org_dict = {}
3386
3387 if org_uuid is None:
3388 return org_dict
3389
3390 content = self.get_org_action(org_uuid=org_uuid)
3391 try:
3392 vdc_list = {}
3393 network_list = {}
3394 catalog_list = {}
3395 vm_list_xmlroot = XmlElementTree.fromstring(content)
3396 for child in vm_list_xmlroot:
3397 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3398 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3399 org_dict['vdcs'] = vdc_list
3400 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3401 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3402 org_dict['networks'] = network_list
3403 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3404 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3405 org_dict['catalogs'] = catalog_list
3406 except:
3407 pass
3408
3409 return org_dict
3410
3411 def get_org_list(self):
3412 """
3413 Method retrieves available organization in vCloud Director
3414
3415 Args:
3416 vca - is active VCA connection.
3417
3418 Returns:
3419 The return dictionary and key for each entry VDC UUID
3420 """
3421
3422 org_dict = {}
3423
3424 content = self.list_org_action()
3425 try:
3426 vm_list_xmlroot = XmlElementTree.fromstring(content)
3427 for vm_xml in vm_list_xmlroot:
3428 if vm_xml.tag.split("}")[1] == 'Org':
3429 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3430 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3431 except:
3432 pass
3433
3434 return org_dict
3435
3436 def vms_view_action(self, vdc_name=None):
3437 """ Method leverages vCloud director vms query call
3438
3439 Args:
3440 vca - is active VCA connection.
3441 vdc_name - is a vdc name that will be used to query vms action
3442
3443 Returns:
3444 The return XML respond
3445 """
3446 vca = self.connect()
3447 if vdc_name is None:
3448 return None
3449
3450 url_list = [vca.host, '/api/vms/query']
3451 vm_list_rest_call = ''.join(url_list)
3452
3453 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3454 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
3455 vca.vcloud_session.organization.Link)
3456 #For python3
3457 #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
3458 # ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3459 if len(refs) == 1:
3460 response = Http.get(url=vm_list_rest_call,
3461 headers=vca.vcloud_session.get_vcloud_headers(),
3462 verify=vca.verify,
3463 logger=vca.logger)
3464 if response.status_code == requests.codes.ok:
3465 return response.content
3466
3467 return None
3468
3469 def get_vapp_list(self, vdc_name=None):
3470 """
3471 Method retrieves vApp list deployed vCloud director and returns a dictionary
3472 contains a list of all vapp deployed for queried VDC.
3473 The key for a dictionary is vApp UUID
3474
3475
3476 Args:
3477 vca - is active VCA connection.
3478 vdc_name - is a vdc name that will be used to query vms action
3479
3480 Returns:
3481 The return dictionary and key for each entry vapp UUID
3482 """
3483
3484 vapp_dict = {}
3485 if vdc_name is None:
3486 return vapp_dict
3487
3488 content = self.vms_view_action(vdc_name=vdc_name)
3489 try:
3490 vm_list_xmlroot = XmlElementTree.fromstring(content)
3491 for vm_xml in vm_list_xmlroot:
3492 if vm_xml.tag.split("}")[1] == 'VMRecord':
3493 if vm_xml.attrib['isVAppTemplate'] == 'true':
3494 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3495 if 'vappTemplate-' in rawuuid[0]:
3496 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3497 # vm and use raw UUID as key
3498 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3499 except:
3500 pass
3501
3502 return vapp_dict
3503
3504 def get_vm_list(self, vdc_name=None):
3505 """
3506 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3507 contains a list of all VM's deployed for queried VDC.
3508 The key for a dictionary is VM UUID
3509
3510
3511 Args:
3512 vca - is active VCA connection.
3513 vdc_name - is a vdc name that will be used to query vms action
3514
3515 Returns:
3516 The return dictionary and key for each entry vapp UUID
3517 """
3518 vm_dict = {}
3519
3520 if vdc_name is None:
3521 return vm_dict
3522
3523 content = self.vms_view_action(vdc_name=vdc_name)
3524 try:
3525 vm_list_xmlroot = XmlElementTree.fromstring(content)
3526 for vm_xml in vm_list_xmlroot:
3527 if vm_xml.tag.split("}")[1] == 'VMRecord':
3528 if vm_xml.attrib['isVAppTemplate'] == 'false':
3529 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3530 if 'vm-' in rawuuid[0]:
3531 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3532 # vm and use raw UUID as key
3533 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3534 except:
3535 pass
3536
3537 return vm_dict
3538
3539 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3540 """
3541 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3542 contains a list of all VM's deployed for queried VDC.
3543 The key for a dictionary is VM UUID
3544
3545
3546 Args:
3547 vca - is active VCA connection.
3548 vdc_name - is a vdc name that will be used to query vms action
3549
3550 Returns:
3551 The return dictionary and key for each entry vapp UUID
3552 """
3553 vm_dict = {}
3554 vca = self.connect()
3555 if not vca:
3556 raise vimconn.vimconnConnectionException("self.connect() is failed")
3557
3558 if vdc_name is None:
3559 return vm_dict
3560
3561 content = self.vms_view_action(vdc_name=vdc_name)
3562 try:
3563 vm_list_xmlroot = XmlElementTree.fromstring(content)
3564 for vm_xml in vm_list_xmlroot:
3565 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3566 # lookup done by UUID
3567 if isuuid:
3568 if vapp_name in vm_xml.attrib['container']:
3569 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3570 if 'vm-' in rawuuid[0]:
3571 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3572 break
3573 # lookup done by Name
3574 else:
3575 if vapp_name in vm_xml.attrib['name']:
3576 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3577 if 'vm-' in rawuuid[0]:
3578 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3579 break
3580 except:
3581 pass
3582
3583 return vm_dict
3584
3585 def get_network_action(self, network_uuid=None):
3586 """
3587 Method leverages vCloud director and query network based on network uuid
3588
3589 Args:
3590 vca - is active VCA connection.
3591 network_uuid - is a network uuid
3592
3593 Returns:
3594 The return XML respond
3595 """
3596
3597 if network_uuid is None:
3598 return None
3599
3600 url_list = [self.url, '/api/network/', network_uuid]
3601 vm_list_rest_call = ''.join(url_list)
3602
3603 if self.client._session:
3604 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3605 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3606
3607 response = self.perform_request(req_type='GET',
3608 url=vm_list_rest_call,
3609 headers=headers)
3610 #Retry login if session expired & retry sending request
3611 if response.status_code == 403:
3612 response = self.retry_rest('GET', vm_list_rest_call)
3613
3614 if response.status_code == requests.codes.ok:
3615 return response.content
3616
3617 return None
3618
3619 def get_vcd_network(self, network_uuid=None):
3620 """
3621 Method retrieves available network from vCloud Director
3622
3623 Args:
3624 network_uuid - is VCD network UUID
3625
3626 Each element serialized as key : value pair
3627
3628 Following keys available for access. network_configuration['Gateway'}
3629 <Configuration>
3630 <IpScopes>
3631 <IpScope>
3632 <IsInherited>true</IsInherited>
3633 <Gateway>172.16.252.100</Gateway>
3634 <Netmask>255.255.255.0</Netmask>
3635 <Dns1>172.16.254.201</Dns1>
3636 <Dns2>172.16.254.202</Dns2>
3637 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3638 <IsEnabled>true</IsEnabled>
3639 <IpRanges>
3640 <IpRange>
3641 <StartAddress>172.16.252.1</StartAddress>
3642 <EndAddress>172.16.252.99</EndAddress>
3643 </IpRange>
3644 </IpRanges>
3645 </IpScope>
3646 </IpScopes>
3647 <FenceMode>bridged</FenceMode>
3648
3649 Returns:
3650 The return dictionary and key for each entry vapp UUID
3651 """
3652
3653 network_configuration = {}
3654 if network_uuid is None:
3655 return network_uuid
3656
3657 try:
3658 content = self.get_network_action(network_uuid=network_uuid)
3659 vm_list_xmlroot = XmlElementTree.fromstring(content)
3660
3661 network_configuration['status'] = vm_list_xmlroot.get("status")
3662 network_configuration['name'] = vm_list_xmlroot.get("name")
3663 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3664
3665 for child in vm_list_xmlroot:
3666 if child.tag.split("}")[1] == 'IsShared':
3667 network_configuration['isShared'] = child.text.strip()
3668 if child.tag.split("}")[1] == 'Configuration':
3669 for configuration in child.iter():
3670 tagKey = configuration.tag.split("}")[1].strip()
3671 if tagKey != "":
3672 network_configuration[tagKey] = configuration.text.strip()
3673 return network_configuration
3674 except Exception as exp :
3675 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3676 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3677
3678 return network_configuration
3679
3680 def delete_network_action(self, network_uuid=None):
3681 """
3682 Method delete given network from vCloud director
3683
3684 Args:
3685 network_uuid - is a network uuid that client wish to delete
3686
3687 Returns:
3688 The return None or XML respond or false
3689 """
3690 client = self.connect_as_admin()
3691 if not client:
3692 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3693 if network_uuid is None:
3694 return False
3695
3696 url_list = [self.url, '/api/admin/network/', network_uuid]
3697 vm_list_rest_call = ''.join(url_list)
3698
3699 if client._session:
3700 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3701 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3702 response = self.perform_request(req_type='DELETE',
3703 url=vm_list_rest_call,
3704 headers=headers)
3705 if response.status_code == 202:
3706 return True
3707
3708 return False
3709
3710 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3711 ip_profile=None, isshared='true'):
3712 """
3713 Method create network in vCloud director
3714
3715 Args:
3716 network_name - is network name to be created.
3717 net_type - can be 'bridge','data','ptp','mgmt'.
3718 ip_profile is a dict containing the IP parameters of the network
3719 isshared - is a boolean
3720 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3721 It optional attribute. by default if no parent network indicate the first available will be used.
3722
3723 Returns:
3724 The return network uuid or return None
3725 """
3726
3727 new_network_name = [network_name, '-', str(uuid.uuid4())]
3728 content = self.create_network_rest(network_name=''.join(new_network_name),
3729 ip_profile=ip_profile,
3730 net_type=net_type,
3731 parent_network_uuid=parent_network_uuid,
3732 isshared=isshared)
3733 if content is None:
3734 self.logger.debug("Failed create network {}.".format(network_name))
3735 return None
3736
3737 try:
3738 vm_list_xmlroot = XmlElementTree.fromstring(content)
3739 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3740 if len(vcd_uuid) == 4:
3741 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3742 return vcd_uuid[3]
3743 except:
3744 self.logger.debug("Failed create network {}".format(network_name))
3745 return None
3746
3747 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3748 ip_profile=None, isshared='true'):
3749 """
3750 Method create network in vCloud director
3751
3752 Args:
3753 network_name - is network name to be created.
3754 net_type - can be 'bridge','data','ptp','mgmt'.
3755 ip_profile is a dict containing the IP parameters of the network
3756 isshared - is a boolean
3757 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3758 It optional attribute. by default if no parent network indicate the first available will be used.
3759
3760 Returns:
3761 The return network uuid or return None
3762 """
3763 client_as_admin = self.connect_as_admin()
3764 if not client_as_admin:
3765 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3766 if network_name is None:
3767 return None
3768
3769 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3770 vm_list_rest_call = ''.join(url_list)
3771
3772 if client_as_admin._session:
3773 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3774 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3775
3776 response = self.perform_request(req_type='GET',
3777 url=vm_list_rest_call,
3778 headers=headers)
3779
3780 provider_network = None
3781 available_networks = None
3782 add_vdc_rest_url = None
3783
3784 if response.status_code != requests.codes.ok:
3785 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3786 response.status_code))
3787 return None
3788 else:
3789 try:
3790 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3791 for child in vm_list_xmlroot:
3792 if child.tag.split("}")[1] == 'ProviderVdcReference':
3793 provider_network = child.attrib.get('href')
3794 # application/vnd.vmware.admin.providervdc+xml
3795 if child.tag.split("}")[1] == 'Link':
3796 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3797 and child.attrib.get('rel') == 'add':
3798 add_vdc_rest_url = child.attrib.get('href')
3799 except:
3800 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3801 self.logger.debug("Respond body {}".format(response.content))
3802 return None
3803
3804 # find pvdc provided available network
3805 response = self.perform_request(req_type='GET',
3806 url=provider_network,
3807 headers=headers)
3808 if response.status_code != requests.codes.ok:
3809 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3810 response.status_code))
3811 return None
3812
3813 if parent_network_uuid is None:
3814 try:
3815 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3816 for child in vm_list_xmlroot.iter():
3817 if child.tag.split("}")[1] == 'AvailableNetworks':
3818 for networks in child.iter():
3819 # application/vnd.vmware.admin.network+xml
3820 if networks.attrib.get('href') is not None:
3821 available_networks = networks.attrib.get('href')
3822 break
3823 except:
3824 return None
3825
3826 try:
3827 #Configure IP profile of the network
3828 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3829
3830 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3831 subnet_rand = random.randint(0, 255)
3832 ip_base = "192.168.{}.".format(subnet_rand)
3833 ip_profile['subnet_address'] = ip_base + "0/24"
3834 else:
3835 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3836
3837 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3838 ip_profile['gateway_address']=ip_base + "1"
3839 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3840 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3841 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3842 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3843 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3844 ip_profile['dhcp_start_address']=ip_base + "3"
3845 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3846 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3847 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3848 ip_profile['dns_address']=ip_base + "2"
3849
3850 gateway_address=ip_profile['gateway_address']
3851 dhcp_count=int(ip_profile['dhcp_count'])
3852 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3853
3854 if ip_profile['dhcp_enabled']==True:
3855 dhcp_enabled='true'
3856 else:
3857 dhcp_enabled='false'
3858 dhcp_start_address=ip_profile['dhcp_start_address']
3859
3860 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3861 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3862 end_ip_int += dhcp_count - 1
3863 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3864
3865 ip_version=ip_profile['ip_version']
3866 dns_address=ip_profile['dns_address']
3867 except KeyError as exp:
3868 self.logger.debug("Create Network REST: Key error {}".format(exp))
3869 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3870
3871 # either use client provided UUID or search for a first available
3872 # if both are not defined we return none
3873 if parent_network_uuid is not None:
3874 provider_network = None
3875 available_networks = None
3876 add_vdc_rest_url = None
3877
3878 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3879 add_vdc_rest_url = ''.join(url_list)
3880
3881 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3882 available_networks = ''.join(url_list)
3883
3884 #Creating all networks as Direct Org VDC type networks.
3885 #Unused in case of Underlay (data/ptp) network interface.
3886 fence_mode="isolated"
3887 is_inherited='false'
3888 dns_list = dns_address.split(";")
3889 dns1 = dns_list[0]
3890 dns2_text = ""
3891 if len(dns_list) >= 2:
3892 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3893 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3894 <Description>Openmano created</Description>
3895 <Configuration>
3896 <IpScopes>
3897 <IpScope>
3898 <IsInherited>{1:s}</IsInherited>
3899 <Gateway>{2:s}</Gateway>
3900 <Netmask>{3:s}</Netmask>
3901 <Dns1>{4:s}</Dns1>{5:s}
3902 <IsEnabled>{6:s}</IsEnabled>
3903 <IpRanges>
3904 <IpRange>
3905 <StartAddress>{7:s}</StartAddress>
3906 <EndAddress>{8:s}</EndAddress>
3907 </IpRange>
3908 </IpRanges>
3909 </IpScope>
3910 </IpScopes>
3911 <FenceMode>{9:s}</FenceMode>
3912 </Configuration>
3913 <IsShared>{10:s}</IsShared>
3914 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3915 subnet_address, dns1, dns2_text, dhcp_enabled,
3916 dhcp_start_address, dhcp_end_address,
3917 fence_mode, isshared)
3918
3919 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3920 try:
3921 response = self.perform_request(req_type='POST',
3922 url=add_vdc_rest_url,
3923 headers=headers,
3924 data=data)
3925
3926 if response.status_code != 201:
3927 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
3928 .format(response.status_code,response.content))
3929 else:
3930 network_task = self.get_task_from_response(response.content)
3931 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3932 time.sleep(5)
3933 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3934 if result.get('status') == 'success':
3935 return response.content
3936 else:
3937 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3938 .format(response.content))
3939 except Exception as exp:
3940 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3941
3942 return None
3943
3944 def convert_cidr_to_netmask(self, cidr_ip=None):
3945 """
3946 Method sets convert CIDR netmask address to normal IP format
3947 Args:
3948 cidr_ip : CIDR IP address
3949 Returns:
3950 netmask : Converted netmask
3951 """
3952 if cidr_ip is not None:
3953 if '/' in cidr_ip:
3954 network, net_bits = cidr_ip.split('/')
3955 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3956 else:
3957 netmask = cidr_ip
3958 return netmask
3959 return None
3960
3961 def get_provider_rest(self, vca=None):
3962 """
3963 Method gets provider vdc view from vcloud director
3964
3965 Args:
3966 network_name - is network name to be created.
3967 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3968 It optional attribute. by default if no parent network indicate the first available will be used.
3969
3970 Returns:
3971 The return xml content of respond or None
3972 """
3973
3974 url_list = [self.url, '/api/admin']
3975 if vca:
3976 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3977 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3978 response = self.perform_request(req_type='GET',
3979 url=''.join(url_list),
3980 headers=headers)
3981
3982 if response.status_code == requests.codes.ok:
3983 return response.content
3984 return None
3985
3986 def create_vdc(self, vdc_name=None):
3987
3988 vdc_dict = {}
3989
3990 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3991 if xml_content is not None:
3992 try:
3993 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3994 for child in task_resp_xmlroot:
3995 if child.tag.split("}")[1] == 'Owner':
3996 vdc_id = child.attrib.get('href').split("/")[-1]
3997 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3998 return vdc_dict
3999 except:
4000 self.logger.debug("Respond body {}".format(xml_content))
4001
4002 return None
4003
4004 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4005 """
4006 Method create vdc in vCloud director based on VDC template.
4007 it uses pre-defined template.
4008
4009 Args:
4010 vdc_name - name of a new vdc.
4011
4012 Returns:
4013 The return xml content of respond or None
4014 """
4015 # pre-requesite atleast one vdc template should be available in vCD
4016 self.logger.info("Creating new vdc {}".format(vdc_name))
4017 vca = self.connect_as_admin()
4018 if not vca:
4019 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4020 if vdc_name is None:
4021 return None
4022
4023 url_list = [self.url, '/api/vdcTemplates']
4024 vm_list_rest_call = ''.join(url_list)
4025
4026 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4027 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4028 response = self.perform_request(req_type='GET',
4029 url=vm_list_rest_call,
4030 headers=headers)
4031
4032 # container url to a template
4033 vdc_template_ref = None
4034 try:
4035 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4036 for child in vm_list_xmlroot:
4037 # application/vnd.vmware.admin.providervdc+xml
4038 # we need find a template from witch we instantiate VDC
4039 if child.tag.split("}")[1] == 'VdcTemplate':
4040 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4041 vdc_template_ref = child.attrib.get('href')
4042 except:
4043 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4044 self.logger.debug("Respond body {}".format(response.content))
4045 return None
4046
4047 # if we didn't found required pre defined template we return None
4048 if vdc_template_ref is None:
4049 return None
4050
4051 try:
4052 # instantiate vdc
4053 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4054 vm_list_rest_call = ''.join(url_list)
4055 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4056 <Source href="{1:s}"></Source>
4057 <Description>opnemano</Description>
4058 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4059
4060 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4061
4062 response = self.perform_request(req_type='POST',
4063 url=vm_list_rest_call,
4064 headers=headers,
4065 data=data)
4066
4067 vdc_task = self.get_task_from_response(response.content)
4068 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4069
4070 # if we all ok we respond with content otherwise by default None
4071 if response.status_code >= 200 and response.status_code < 300:
4072 return response.content
4073 return None
4074 except:
4075 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4076 self.logger.debug("Respond body {}".format(response.content))
4077
4078 return None
4079
4080 def create_vdc_rest(self, vdc_name=None):
4081 """
4082 Method create network in vCloud director
4083
4084 Args:
4085 vdc_name - vdc name to be created
4086 Returns:
4087 The return response
4088 """
4089
4090 self.logger.info("Creating new vdc {}".format(vdc_name))
4091
4092 vca = self.connect_as_admin()
4093 if not vca:
4094 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4095 if vdc_name is None:
4096 return None
4097
4098 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4099 vm_list_rest_call = ''.join(url_list)
4100
4101 if vca._session:
4102 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4103 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4104 response = self.perform_request(req_type='GET',
4105 url=vm_list_rest_call,
4106 headers=headers)
4107
4108 provider_vdc_ref = None
4109 add_vdc_rest_url = None
4110 available_networks = None
4111
4112 if response.status_code != requests.codes.ok:
4113 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4114 response.status_code))
4115 return None
4116 else:
4117 try:
4118 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4119 for child in vm_list_xmlroot:
4120 # application/vnd.vmware.admin.providervdc+xml
4121 if child.tag.split("}")[1] == 'Link':
4122 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4123 and child.attrib.get('rel') == 'add':
4124 add_vdc_rest_url = child.attrib.get('href')
4125 except:
4126 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4127 self.logger.debug("Respond body {}".format(response.content))
4128 return None
4129
4130 response = self.get_provider_rest(vca=vca)
4131 try:
4132 vm_list_xmlroot = XmlElementTree.fromstring(response)
4133 for child in vm_list_xmlroot:
4134 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4135 for sub_child in child:
4136 provider_vdc_ref = sub_child.attrib.get('href')
4137 except:
4138 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4139 self.logger.debug("Respond body {}".format(response))
4140 return None
4141
4142 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4143 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4144 <AllocationModel>ReservationPool</AllocationModel>
4145 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4146 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4147 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4148 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4149 <ProviderVdcReference
4150 name="Main Provider"
4151 href="{2:s}" />
4152 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4153 escape(vdc_name),
4154 provider_vdc_ref)
4155
4156 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4157
4158 response = self.perform_request(req_type='POST',
4159 url=add_vdc_rest_url,
4160 headers=headers,
4161 data=data)
4162
4163 # if we all ok we respond with content otherwise by default None
4164 if response.status_code == 201:
4165 return response.content
4166 return None
4167
4168 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4169 """
4170 Method retrieve vapp detail from vCloud director
4171
4172 Args:
4173 vapp_uuid - is vapp identifier.
4174
4175 Returns:
4176 The return network uuid or return None
4177 """
4178
4179 parsed_respond = {}
4180 vca = None
4181
4182 if need_admin_access:
4183 vca = self.connect_as_admin()
4184 else:
4185 vca = self.client
4186
4187 if not vca:
4188 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4189 if vapp_uuid is None:
4190 return None
4191
4192 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4193 get_vapp_restcall = ''.join(url_list)
4194
4195 if vca._session:
4196 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4197 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4198 response = self.perform_request(req_type='GET',
4199 url=get_vapp_restcall,
4200 headers=headers)
4201
4202 if response.status_code == 403:
4203 if need_admin_access == False:
4204 response = self.retry_rest('GET', get_vapp_restcall)
4205
4206 if response.status_code != requests.codes.ok:
4207 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4208 response.status_code))
4209 return parsed_respond
4210
4211 try:
4212 xmlroot_respond = XmlElementTree.fromstring(response.content)
4213 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4214
4215 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4216 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4217 'vmw': 'http://www.vmware.com/schema/ovf',
4218 'vm': 'http://www.vmware.com/vcloud/v1.5',
4219 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4220 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4221 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4222 }
4223
4224 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4225 if created_section is not None:
4226 parsed_respond['created'] = created_section.text
4227
4228 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4229 if network_section is not None and 'networkName' in network_section.attrib:
4230 parsed_respond['networkname'] = network_section.attrib['networkName']
4231
4232 ipscopes_section = \
4233 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4234 namespaces)
4235 if ipscopes_section is not None:
4236 for ipscope in ipscopes_section:
4237 for scope in ipscope:
4238 tag_key = scope.tag.split("}")[1]
4239 if tag_key == 'IpRanges':
4240 ip_ranges = scope.getchildren()
4241 for ipblock in ip_ranges:
4242 for block in ipblock:
4243 parsed_respond[block.tag.split("}")[1]] = block.text
4244 else:
4245 parsed_respond[tag_key] = scope.text
4246
4247 # parse children section for other attrib
4248 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4249 if children_section is not None:
4250 parsed_respond['name'] = children_section.attrib['name']
4251 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4252 if "nestedHypervisorEnabled" in children_section.attrib else None
4253 parsed_respond['deployed'] = children_section.attrib['deployed']
4254 parsed_respond['status'] = children_section.attrib['status']
4255 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4256 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4257 nic_list = []
4258 for adapters in network_adapter:
4259 adapter_key = adapters.tag.split("}")[1]
4260 if adapter_key == 'PrimaryNetworkConnectionIndex':
4261 parsed_respond['primarynetwork'] = adapters.text
4262 if adapter_key == 'NetworkConnection':
4263 vnic = {}
4264 if 'network' in adapters.attrib:
4265 vnic['network'] = adapters.attrib['network']
4266 for adapter in adapters:
4267 setting_key = adapter.tag.split("}")[1]
4268 vnic[setting_key] = adapter.text
4269 nic_list.append(vnic)
4270
4271 for link in children_section:
4272 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4273 if link.attrib['rel'] == 'screen:acquireTicket':
4274 parsed_respond['acquireTicket'] = link.attrib
4275 if link.attrib['rel'] == 'screen:acquireMksTicket':
4276 parsed_respond['acquireMksTicket'] = link.attrib
4277
4278 parsed_respond['interfaces'] = nic_list
4279 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4280 if vCloud_extension_section is not None:
4281 vm_vcenter_info = {}
4282 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4283 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4284 if vmext is not None:
4285 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4286 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4287
4288 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4289 vm_virtual_hardware_info = {}
4290 if virtual_hardware_section is not None:
4291 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4292 if item.find("rasd:Description",namespaces).text == "Hard disk":
4293 disk_size = item.find("rasd:HostResource" ,namespaces
4294 ).attrib["{"+namespaces['vm']+"}capacity"]
4295
4296 vm_virtual_hardware_info["disk_size"]= disk_size
4297 break
4298
4299 for link in virtual_hardware_section:
4300 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4301 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4302 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4303 break
4304
4305 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4306 except Exception as exp :
4307 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4308 return parsed_respond
4309
4310 def acquire_console(self, vm_uuid=None):
4311
4312 if vm_uuid is None:
4313 return None
4314 if self.client._session:
4315 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4316 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4317 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4318 console_dict = vm_dict['acquireTicket']
4319 console_rest_call = console_dict['href']
4320
4321 response = self.perform_request(req_type='POST',
4322 url=console_rest_call,
4323 headers=headers)
4324
4325 if response.status_code == 403:
4326 response = self.retry_rest('POST', console_rest_call)
4327
4328 if response.status_code == requests.codes.ok:
4329 return response.content
4330
4331 return None
4332
4333 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4334 """
4335 Method retrieve vm disk details
4336
4337 Args:
4338 vapp_uuid - is vapp identifier.
4339 flavor_disk - disk size as specified in VNFD (flavor)
4340
4341 Returns:
4342 The return network uuid or return None
4343 """
4344 status = None
4345 try:
4346 #Flavor disk is in GB convert it into MB
4347 flavor_disk = int(flavor_disk) * 1024
4348 vm_details = self.get_vapp_details_rest(vapp_uuid)
4349 if vm_details:
4350 vm_name = vm_details["name"]
4351 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4352
4353 if vm_details and "vm_virtual_hardware" in vm_details:
4354 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4355 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4356
4357 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4358
4359 if flavor_disk > vm_disk:
4360 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4361 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4362 vm_disk, flavor_disk ))
4363 else:
4364 status = True
4365 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4366
4367 return status
4368 except Exception as exp:
4369 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4370
4371
4372 def modify_vm_disk_rest(self, disk_href , disk_size):
4373 """
4374 Method retrieve modify vm disk size
4375
4376 Args:
4377 disk_href - vCD API URL to GET and PUT disk data
4378 disk_size - disk size as specified in VNFD (flavor)
4379
4380 Returns:
4381 The return network uuid or return None
4382 """
4383 if disk_href is None or disk_size is None:
4384 return None
4385
4386 if self.client._session:
4387 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4388 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4389 response = self.perform_request(req_type='GET',
4390 url=disk_href,
4391 headers=headers)
4392
4393 if response.status_code == 403:
4394 response = self.retry_rest('GET', disk_href)
4395
4396 if response.status_code != requests.codes.ok:
4397 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4398 response.status_code))
4399 return None
4400 try:
4401 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4402 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4403 #For python3
4404 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
4405 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4406
4407 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4408 if item.find("rasd:Description",namespaces).text == "Hard disk":
4409 disk_item = item.find("rasd:HostResource" ,namespaces )
4410 if disk_item is not None:
4411 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4412 break
4413
4414 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4415 xml_declaration=True)
4416
4417 #Send PUT request to modify disk size
4418 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4419
4420 response = self.perform_request(req_type='PUT',
4421 url=disk_href,
4422 headers=headers,
4423 data=data)
4424 if response.status_code == 403:
4425 add_headers = {'Content-Type': headers['Content-Type']}
4426 response = self.retry_rest('PUT', disk_href, add_headers, data)
4427
4428 if response.status_code != 202:
4429 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4430 response.status_code))
4431 else:
4432 modify_disk_task = self.get_task_from_response(response.content)
4433 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4434 if result.get('status') == 'success':
4435 return True
4436 else:
4437 return False
4438 return None
4439
4440 except Exception as exp :
4441 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4442 return None
4443
4444 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4445 """
4446 Method to attach pci devices to VM
4447
4448 Args:
4449 vapp_uuid - uuid of vApp/VM
4450 pci_devices - pci devices infromation as specified in VNFD (flavor)
4451
4452 Returns:
4453 The status of add pci device task , vm object and
4454 vcenter_conect object
4455 """
4456 vm_obj = None
4457 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4458 vcenter_conect, content = self.get_vcenter_content()
4459 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4460
4461 if vm_moref_id:
4462 try:
4463 no_of_pci_devices = len(pci_devices)
4464 if no_of_pci_devices > 0:
4465 #Get VM and its host
4466 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4467 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4468 if host_obj and vm_obj:
4469 #get PCI devies from host on which vapp is currently installed
4470 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4471
4472 if avilable_pci_devices is None:
4473 #find other hosts with active pci devices
4474 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4475 content,
4476 no_of_pci_devices
4477 )
4478
4479 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4480 #Migrate vm to the host where PCI devices are availble
4481 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4482 task = self.relocate_vm(new_host_obj, vm_obj)
4483 if task is not None:
4484 result = self.wait_for_vcenter_task(task, vcenter_conect)
4485 self.logger.info("Migrate VM status: {}".format(result))
4486 host_obj = new_host_obj
4487 else:
4488 self.logger.info("Fail to migrate VM : {}".format(result))
4489 raise vimconn.vimconnNotFoundException(
4490 "Fail to migrate VM : {} to host {}".format(
4491 vmname_andid,
4492 new_host_obj)
4493 )
4494
4495 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4496 #Add PCI devices one by one
4497 for pci_device in avilable_pci_devices:
4498 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4499 if task:
4500 status= self.wait_for_vcenter_task(task, vcenter_conect)
4501 if status:
4502 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4503 else:
4504 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4505 return True, vm_obj, vcenter_conect
4506 else:
4507 self.logger.error("Currently there is no host with"\
4508 " {} number of avaialble PCI devices required for VM {}".format(
4509 no_of_pci_devices,
4510 vmname_andid)
4511 )
4512 raise vimconn.vimconnNotFoundException(
4513 "Currently there is no host with {} "\
4514 "number of avaialble PCI devices required for VM {}".format(
4515 no_of_pci_devices,
4516 vmname_andid))
4517 else:
4518 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4519
4520 except vmodl.MethodFault as error:
4521 self.logger.error("Error occurred while adding PCI devices {} ",error)
4522 return None, vm_obj, vcenter_conect
4523
4524 def get_vm_obj(self, content, mob_id):
4525 """
4526 Method to get the vsphere VM object associated with a given morf ID
4527 Args:
4528 vapp_uuid - uuid of vApp/VM
4529 content - vCenter content object
4530 mob_id - mob_id of VM
4531
4532 Returns:
4533 VM and host object
4534 """
4535 vm_obj = None
4536 host_obj = None
4537 try :
4538 container = content.viewManager.CreateContainerView(content.rootFolder,
4539 [vim.VirtualMachine], True
4540 )
4541 for vm in container.view:
4542 mobID = vm._GetMoId()
4543 if mobID == mob_id:
4544 vm_obj = vm
4545 host_obj = vm_obj.runtime.host
4546 break
4547 except Exception as exp:
4548 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4549 return host_obj, vm_obj
4550
4551 def get_pci_devices(self, host, need_devices):
4552 """
4553 Method to get the details of pci devices on given host
4554 Args:
4555 host - vSphere host object
4556 need_devices - number of pci devices needed on host
4557
4558 Returns:
4559 array of pci devices
4560 """
4561 all_devices = []
4562 all_device_ids = []
4563 used_devices_ids = []
4564
4565 try:
4566 if host:
4567 pciPassthruInfo = host.config.pciPassthruInfo
4568 pciDevies = host.hardware.pciDevice
4569
4570 for pci_status in pciPassthruInfo:
4571 if pci_status.passthruActive:
4572 for device in pciDevies:
4573 if device.id == pci_status.id:
4574 all_device_ids.append(device.id)
4575 all_devices.append(device)
4576
4577 #check if devices are in use
4578 avalible_devices = all_devices
4579 for vm in host.vm:
4580 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4581 vm_devices = vm.config.hardware.device
4582 for device in vm_devices:
4583 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4584 if device.backing.id in all_device_ids:
4585 for use_device in avalible_devices:
4586 if use_device.id == device.backing.id:
4587 avalible_devices.remove(use_device)
4588 used_devices_ids.append(device.backing.id)
4589 self.logger.debug("Device {} from devices {}"\
4590 "is in use".format(device.backing.id,
4591 device)
4592 )
4593 if len(avalible_devices) < need_devices:
4594 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4595 need_devices))
4596 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4597 avalible_devices))
4598 return None
4599 else:
4600 required_devices = avalible_devices[:need_devices]
4601 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4602 len(avalible_devices),
4603 host,
4604 need_devices))
4605 self.logger.info("Retruning {} devices as {}".format(need_devices,
4606 required_devices ))
4607 return required_devices
4608
4609 except Exception as exp:
4610 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4611
4612 return None
4613
4614 def get_host_and_PCIdevices(self, content, need_devices):
4615 """
4616 Method to get the details of pci devices infromation on all hosts
4617
4618 Args:
4619 content - vSphere host object
4620 need_devices - number of pci devices needed on host
4621
4622 Returns:
4623 array of pci devices and host object
4624 """
4625 host_obj = None
4626 pci_device_objs = None
4627 try:
4628 if content:
4629 container = content.viewManager.CreateContainerView(content.rootFolder,
4630 [vim.HostSystem], True)
4631 for host in container.view:
4632 devices = self.get_pci_devices(host, need_devices)
4633 if devices:
4634 host_obj = host
4635 pci_device_objs = devices
4636 break
4637 except Exception as exp:
4638 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4639
4640 return host_obj,pci_device_objs
4641
4642 def relocate_vm(self, dest_host, vm) :
4643 """
4644 Method to get the relocate VM to new host
4645
4646 Args:
4647 dest_host - vSphere host object
4648 vm - vSphere VM object
4649
4650 Returns:
4651 task object
4652 """
4653 task = None
4654 try:
4655 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4656 task = vm.Relocate(relocate_spec)
4657 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4658 except Exception as exp:
4659 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4660 dest_host, vm, exp))
4661 return task
4662
4663 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4664 """
4665 Waits and provides updates on a vSphere task
4666 """
4667 while task.info.state == vim.TaskInfo.State.running:
4668 time.sleep(2)
4669
4670 if task.info.state == vim.TaskInfo.State.success:
4671 if task.info.result is not None and not hideResult:
4672 self.logger.info('{} completed successfully, result: {}'.format(
4673 actionName,
4674 task.info.result))
4675 else:
4676 self.logger.info('Task {} completed successfully.'.format(actionName))
4677 else:
4678 self.logger.error('{} did not complete successfully: {} '.format(
4679 actionName,
4680 task.info.error)
4681 )
4682
4683 return task.info.result
4684
4685 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4686 """
4687 Method to add pci device in given VM
4688
4689 Args:
4690 host_object - vSphere host object
4691 vm_object - vSphere VM object
4692 host_pci_dev - host_pci_dev must be one of the devices from the
4693 host_object.hardware.pciDevice list
4694 which is configured as a PCI passthrough device
4695
4696 Returns:
4697 task object
4698 """
4699 task = None
4700 if vm_object and host_object and host_pci_dev:
4701 try :
4702 #Add PCI device to VM
4703 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4704 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4705
4706 if host_pci_dev.id not in systemid_by_pciid:
4707 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4708 return None
4709
4710 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4711 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4712 id=host_pci_dev.id,
4713 systemId=systemid_by_pciid[host_pci_dev.id],
4714 vendorId=host_pci_dev.vendorId,
4715 deviceName=host_pci_dev.deviceName)
4716
4717 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4718
4719 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4720 new_device_config.operation = "add"
4721 vmConfigSpec = vim.vm.ConfigSpec()
4722 vmConfigSpec.deviceChange = [new_device_config]
4723
4724 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4725 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4726 host_pci_dev, vm_object, host_object)
4727 )
4728 except Exception as exp:
4729 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4730 host_pci_dev,
4731 vm_object,
4732 exp))
4733 return task
4734
4735 def get_vm_vcenter_info(self):
4736 """
4737 Method to get details of vCenter and vm
4738
4739 Args:
4740 vapp_uuid - uuid of vApp or VM
4741
4742 Returns:
4743 Moref Id of VM and deails of vCenter
4744 """
4745 vm_vcenter_info = {}
4746
4747 if self.vcenter_ip is not None:
4748 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4749 else:
4750 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4751 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4752 if self.vcenter_port is not None:
4753 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4754 else:
4755 raise vimconn.vimconnException(message="vCenter port is not provided."\
4756 " Please provide vCenter port while attaching datacenter to tenant in --config")
4757 if self.vcenter_user is not None:
4758 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4759 else:
4760 raise vimconn.vimconnException(message="vCenter user is not provided."\
4761 " Please provide vCenter user while attaching datacenter to tenant in --config")
4762
4763 if self.vcenter_password is not None:
4764 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4765 else:
4766 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4767 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4768
4769 return vm_vcenter_info
4770
4771
4772 def get_vm_pci_details(self, vmuuid):
4773 """
4774 Method to get VM PCI device details from vCenter
4775
4776 Args:
4777 vm_obj - vSphere VM object
4778
4779 Returns:
4780 dict of PCI devives attached to VM
4781
4782 """
4783 vm_pci_devices_info = {}
4784 try:
4785 vcenter_conect, content = self.get_vcenter_content()
4786 vm_moref_id = self.get_vm_moref_id(vmuuid)
4787 if vm_moref_id:
4788 #Get VM and its host
4789 if content:
4790 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4791 if host_obj and vm_obj:
4792 vm_pci_devices_info["host_name"]= host_obj.name
4793 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4794 for device in vm_obj.config.hardware.device:
4795 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4796 device_details={'devide_id':device.backing.id,
4797 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4798 }
4799 vm_pci_devices_info[device.deviceInfo.label] = device_details
4800 else:
4801 self.logger.error("Can not connect to vCenter while getting "\
4802 "PCI devices infromationn")
4803 return vm_pci_devices_info
4804 except Exception as exp:
4805 self.logger.error("Error occurred while getting VM infromationn"\
4806 " for VM : {}".format(exp))
4807 raise vimconn.vimconnException(message=exp)
4808
4809
4810 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4811 """
4812 Method to reserve memory for all VMs
4813 Args :
4814 vapp - VApp
4815 memory_mb - Memory in MB
4816 Returns:
4817 None
4818 """
4819
4820 self.logger.info("Reserve memory for all VMs")
4821 for vms in vapp.get_all_vms():
4822 vm_id = vms.get('id').split(':')[-1]
4823
4824 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4825
4826 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4827 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4828 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4829 response = self.perform_request(req_type='GET',
4830 url=url_rest_call,
4831 headers=headers)
4832
4833 if response.status_code == 403:
4834 response = self.retry_rest('GET', url_rest_call)
4835
4836 if response.status_code != 200:
4837 self.logger.error("REST call {} failed reason : {}"\
4838 "status code : {}".format(url_rest_call,
4839 response.content,
4840 response.status_code))
4841 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4842 "memory")
4843
4844 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4845 contentelem = lxmlElementTree.XML(bytexml)
4846 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
4847 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4848
4849 # Find the reservation element in the response
4850 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4851 for memelem in memelem_list:
4852 memelem.text = str(memory_mb)
4853
4854 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4855
4856 response = self.perform_request(req_type='PUT',
4857 url=url_rest_call,
4858 headers=headers,
4859 data=newdata)
4860
4861 if response.status_code == 403:
4862 add_headers = {'Content-Type': headers['Content-Type']}
4863 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4864
4865 if response.status_code != 202:
4866 self.logger.error("REST call {} failed reason : {}"\
4867 "status code : {} ".format(url_rest_call,
4868 response.content,
4869 response.status_code))
4870 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4871 "virtual hardware memory section")
4872 else:
4873 mem_task = self.get_task_from_response(response.content)
4874 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4875 if result.get('status') == 'success':
4876 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4877 .format(vm_id))
4878 else:
4879 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4880 .format(vm_id))
4881
4882 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4883 """
4884 Configure VApp network config with org vdc network
4885 Args :
4886 vapp - VApp
4887 Returns:
4888 None
4889 """
4890
4891 self.logger.info("Connecting vapp {} to org vdc network {}".
4892 format(vapp_id, net_name))
4893
4894 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4895
4896 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4897 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4898 response = self.perform_request(req_type='GET',
4899 url=url_rest_call,
4900 headers=headers)
4901
4902 if response.status_code == 403:
4903 response = self.retry_rest('GET', url_rest_call)
4904
4905 if response.status_code != 200:
4906 self.logger.error("REST call {} failed reason : {}"\
4907 "status code : {}".format(url_rest_call,
4908 response.content,
4909 response.status_code))
4910 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
4911 "network config section")
4912
4913 data = response.content
4914 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4915 net_id = self.get_network_id_by_name(net_name)
4916 if not net_id:
4917 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
4918 "existing network")
4919
4920 bytexml = bytes(bytearray(data, encoding='utf-8'))
4921 newelem = lxmlElementTree.XML(bytexml)
4922 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
4923 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4924 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4925
4926 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
4927 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
4928 if parentnetworklist:
4929 for pn in parentnetworklist:
4930 if "href" not in pn.keys():
4931 id_val = pn.get("id")
4932 href_val = "{}/api/network/{}".format(self.url, id_val)
4933 pn.set("href", href_val)
4934
4935 newstr = """<NetworkConfig networkName="{}">
4936 <Configuration>
4937 <ParentNetwork href="{}/api/network/{}"/>
4938 <FenceMode>bridged</FenceMode>
4939 </Configuration>
4940 </NetworkConfig>
4941 """.format(net_name, self.url, net_id)
4942 newcfgelem = lxmlElementTree.fromstring(newstr)
4943 if nwcfglist:
4944 nwcfglist[0].addnext(newcfgelem)
4945
4946 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
4947
4948 response = self.perform_request(req_type='PUT',
4949 url=url_rest_call,
4950 headers=headers,
4951 data=newdata)
4952
4953 if response.status_code == 403:
4954 add_headers = {'Content-Type': headers['Content-Type']}
4955 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4956
4957 if response.status_code != 202:
4958 self.logger.error("REST call {} failed reason : {}"\
4959 "status code : {} ".format(url_rest_call,
4960 response.content,
4961 response.status_code))
4962 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
4963 "network config section")
4964 else:
4965 vapp_task = self.get_task_from_response(response.content)
4966 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
4967 if result.get('status') == 'success':
4968 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
4969 "network {}".format(vapp_id, net_name))
4970 else:
4971 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
4972 "connect to network {}".format(vapp_id, net_name))
4973
4974 def remove_primary_network_adapter_from_all_vms(self, vapp):
4975 """
4976 Method to remove network adapter type to vm
4977 Args :
4978 vapp - VApp
4979 Returns:
4980 None
4981 """
4982
4983 self.logger.info("Removing network adapter from all VMs")
4984 for vms in vapp.get_all_vms():
4985 vm_id = vms.get('id').split(':')[-1]
4986
4987 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4988
4989 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4990 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4991 response = self.perform_request(req_type='GET',
4992 url=url_rest_call,
4993 headers=headers)
4994
4995 if response.status_code == 403:
4996 response = self.retry_rest('GET', url_rest_call)
4997
4998 if response.status_code != 200:
4999 self.logger.error("REST call {} failed reason : {}"\
5000 "status code : {}".format(url_rest_call,
5001 response.content,
5002 response.status_code))
5003 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
5004 "network connection section")
5005
5006 data = response.content
5007 data = data.split('<Link rel="edit"')[0]
5008
5009 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5010
5011 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5012 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5013 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5014 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5015 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5016 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5017 xmlns:vmw="http://www.vmware.com/schema/ovf"
5018 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5019 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5020 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5021 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
5022 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5023 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5024 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5025 </NetworkConnectionSection>""".format(url=url_rest_call)
5026 response = self.perform_request(req_type='PUT',
5027 url=url_rest_call,
5028 headers=headers,
5029 data=newdata)
5030
5031 if response.status_code == 403:
5032 add_headers = {'Content-Type': headers['Content-Type']}
5033 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5034
5035 if response.status_code != 202:
5036 self.logger.error("REST call {} failed reason : {}"\
5037 "status code : {} ".format(url_rest_call,
5038 response.content,
5039 response.status_code))
5040 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5041 "network connection section")
5042 else:
5043 nic_task = self.get_task_from_response(response.content)
5044 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5045 if result.get('status') == 'success':
5046 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5047 "default NIC type".format(vm_id))
5048 else:
5049 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5050 "connect NIC type".format(vm_id))
5051
5052 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5053 """
5054 Method to add network adapter type to vm
5055 Args :
5056 network_name - name of network
5057 primary_nic_index - int value for primary nic index
5058 nicIndex - int value for nic index
5059 nic_type - specify model name to which add to vm
5060 Returns:
5061 None
5062 """
5063
5064 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5065 format(network_name, nicIndex, nic_type))
5066 try:
5067 ip_address = None
5068 floating_ip = False
5069 mac_address = None
5070 if 'floating_ip' in net: floating_ip = net['floating_ip']
5071
5072 # Stub for ip_address feature
5073 if 'ip_address' in net: ip_address = net['ip_address']
5074
5075 if 'mac_address' in net: mac_address = net['mac_address']
5076
5077 if floating_ip:
5078 allocation_mode = "POOL"
5079 elif ip_address:
5080 allocation_mode = "MANUAL"
5081 else:
5082 allocation_mode = "DHCP"
5083
5084 if not nic_type:
5085 for vms in vapp.get_all_vms():
5086 vm_id = vms.get('id').split(':')[-1]
5087
5088 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5089
5090 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5091 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5092 response = self.perform_request(req_type='GET',
5093 url=url_rest_call,
5094 headers=headers)
5095
5096 if response.status_code == 403:
5097 response = self.retry_rest('GET', url_rest_call)
5098
5099 if response.status_code != 200:
5100 self.logger.error("REST call {} failed reason : {}"\
5101 "status code : {}".format(url_rest_call,
5102 response.content,
5103 response.status_code))
5104 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5105 "network connection section")
5106
5107 data = response.content
5108 data = data.split('<Link rel="edit"')[0]
5109 if '<PrimaryNetworkConnectionIndex>' not in data:
5110 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5111 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5112 <NetworkConnection network="{}">
5113 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5114 <IsConnected>true</IsConnected>
5115 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5116 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5117 allocation_mode)
5118 # Stub for ip_address feature
5119 if ip_address:
5120 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5121 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5122
5123 if mac_address:
5124 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5125 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5126
5127 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5128 else:
5129 self.logger.debug("add_network_adapter PrimaryNIC in data")
5130 new_item = """<NetworkConnection network="{}">
5131 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5132 <IsConnected>true</IsConnected>
5133 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5134 </NetworkConnection>""".format(network_name, nicIndex,
5135 allocation_mode)
5136 # Stub for ip_address feature
5137 if ip_address:
5138 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5139 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5140
5141 if mac_address:
5142 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5143 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5144
5145 data = data + new_item + '</NetworkConnectionSection>'
5146
5147 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5148
5149 response = self.perform_request(req_type='PUT',
5150 url=url_rest_call,
5151 headers=headers,
5152 data=data)
5153
5154 if response.status_code == 403:
5155 add_headers = {'Content-Type': headers['Content-Type']}
5156 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5157
5158 if response.status_code != 202:
5159 self.logger.error("REST call {} failed reason : {}"\
5160 "status code : {} ".format(url_rest_call,
5161 response.content,
5162 response.status_code))
5163 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5164 "network connection section")
5165 else:
5166 nic_task = self.get_task_from_response(response.content)
5167 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5168 if result.get('status') == 'success':
5169 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5170 "default NIC type".format(vm_id))
5171 else:
5172 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5173 "connect NIC type".format(vm_id))
5174 else:
5175 for vms in vapp.get_all_vms():
5176 vm_id = vms.get('id').split(':')[-1]
5177
5178 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5179
5180 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5181 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5182 response = self.perform_request(req_type='GET',
5183 url=url_rest_call,
5184 headers=headers)
5185
5186 if response.status_code == 403:
5187 response = self.retry_rest('GET', url_rest_call)
5188
5189 if response.status_code != 200:
5190 self.logger.error("REST call {} failed reason : {}"\
5191 "status code : {}".format(url_rest_call,
5192 response.content,
5193 response.status_code))
5194 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5195 "network connection section")
5196 data = response.content
5197 data = data.split('<Link rel="edit"')[0]
5198 vcd_netadapter_type = nic_type
5199 if nic_type in ['SR-IOV', 'VF']:
5200 vcd_netadapter_type = "SRIOVETHERNETCARD"
5201
5202 if '<PrimaryNetworkConnectionIndex>' not in data:
5203 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5204 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5205 <NetworkConnection network="{}">
5206 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5207 <IsConnected>true</IsConnected>
5208 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5209 <NetworkAdapterType>{}</NetworkAdapterType>
5210 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5211 allocation_mode, vcd_netadapter_type)
5212 # Stub for ip_address feature
5213 if ip_address:
5214 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5215 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5216
5217 if mac_address:
5218 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5219 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5220
5221 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5222 else:
5223 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5224 new_item = """<NetworkConnection network="{}">
5225 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5226 <IsConnected>true</IsConnected>
5227 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5228 <NetworkAdapterType>{}</NetworkAdapterType>
5229 </NetworkConnection>""".format(network_name, nicIndex,
5230 allocation_mode, vcd_netadapter_type)
5231 # Stub for ip_address feature
5232 if ip_address:
5233 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5234 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5235
5236 if mac_address:
5237 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5238 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5239
5240 data = data + new_item + '</NetworkConnectionSection>'
5241
5242 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5243
5244 response = self.perform_request(req_type='PUT',
5245 url=url_rest_call,
5246 headers=headers,
5247 data=data)
5248
5249 if response.status_code == 403:
5250 add_headers = {'Content-Type': headers['Content-Type']}
5251 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5252
5253 if response.status_code != 202:
5254 self.logger.error("REST call {} failed reason : {}"\
5255 "status code : {}".format(url_rest_call,
5256 response.content,
5257 response.status_code))
5258 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5259 "network connection section")
5260 else:
5261 nic_task = self.get_task_from_response(response.content)
5262 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5263 if result.get('status') == 'success':
5264 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5265 "conneced to NIC type {}".format(vm_id, nic_type))
5266 else:
5267 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5268 "failed to connect NIC type {}".format(vm_id, nic_type))
5269 except Exception as exp:
5270 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5271 "while adding Network adapter")
5272 raise vimconn.vimconnException(message=exp)
5273
5274
5275 def set_numa_affinity(self, vmuuid, paired_threads_id):
5276 """
5277 Method to assign numa affinity in vm configuration parammeters
5278 Args :
5279 vmuuid - vm uuid
5280 paired_threads_id - one or more virtual processor
5281 numbers
5282 Returns:
5283 return if True
5284 """
5285 try:
5286 vcenter_conect, content = self.get_vcenter_content()
5287 vm_moref_id = self.get_vm_moref_id(vmuuid)
5288
5289 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5290 if vm_obj:
5291 config_spec = vim.vm.ConfigSpec()
5292 config_spec.extraConfig = []
5293 opt = vim.option.OptionValue()
5294 opt.key = 'numa.nodeAffinity'
5295 opt.value = str(paired_threads_id)
5296 config_spec.extraConfig.append(opt)
5297 task = vm_obj.ReconfigVM_Task(config_spec)
5298 if task:
5299 result = self.wait_for_vcenter_task(task, vcenter_conect)
5300 extra_config = vm_obj.config.extraConfig
5301 flag = False
5302 for opts in extra_config:
5303 if 'numa.nodeAffinity' in opts.key:
5304 flag = True
5305 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5306 "value {} for vm {}".format(opt.value, vm_obj))
5307 if flag:
5308 return
5309 else:
5310 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5311 except Exception as exp:
5312 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5313 "for VM {} : {}".format(vm_obj, vm_moref_id))
5314 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5315 "affinity".format(exp))
5316
5317
5318 def cloud_init(self, vapp, cloud_config):
5319 """
5320 Method to inject ssh-key
5321 vapp - vapp object
5322 cloud_config a dictionary with:
5323 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5324 'users': (optional) list of users to be inserted, each item is a dict with:
5325 'name': (mandatory) user name,
5326 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5327 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5328 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5329 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5330 'dest': (mandatory) string with the destination absolute path
5331 'encoding': (optional, by default text). Can be one of:
5332 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5333 'content' (mandatory): string with the content of the file
5334 'permissions': (optional) string with file permissions, typically octal notation '0644'
5335 'owner': (optional) file owner, string with the format 'owner:group'
5336 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5337 """
5338 try:
5339 if not isinstance(cloud_config, dict):
5340 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5341 else:
5342 key_pairs = []
5343 userdata = []
5344 if "key-pairs" in cloud_config:
5345 key_pairs = cloud_config["key-pairs"]
5346
5347 if "users" in cloud_config:
5348 userdata = cloud_config["users"]
5349
5350 self.logger.debug("cloud_init : Guest os customization started..")
5351 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5352 customize_script = customize_script.replace("&","&amp;")
5353 self.guest_customization(vapp, customize_script)
5354
5355 except Exception as exp:
5356 self.logger.error("cloud_init : exception occurred while injecting "\
5357 "ssh-key")
5358 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5359 "ssh-key".format(exp))
5360
5361 def format_script(self, key_pairs=[], users_list=[]):
5362 bash_script = """#!/bin/sh
5363 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5364 if [ "$1" = "precustomization" ];then
5365 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5366 """
5367
5368 keys = "\n".join(key_pairs)
5369 if keys:
5370 keys_data = """
5371 if [ ! -d /root/.ssh ];then
5372 mkdir /root/.ssh
5373 chown root:root /root/.ssh
5374 chmod 700 /root/.ssh
5375 touch /root/.ssh/authorized_keys
5376 chown root:root /root/.ssh/authorized_keys
5377 chmod 600 /root/.ssh/authorized_keys
5378 # make centos with selinux happy
5379 which restorecon && restorecon -Rv /root/.ssh
5380 else
5381 touch /root/.ssh/authorized_keys
5382 chown root:root /root/.ssh/authorized_keys
5383 chmod 600 /root/.ssh/authorized_keys
5384 fi
5385 echo '{key}' >> /root/.ssh/authorized_keys
5386 """.format(key=keys)
5387
5388 bash_script+= keys_data
5389
5390 for user in users_list:
5391 if 'name' in user: user_name = user['name']
5392 if 'key-pairs' in user:
5393 user_keys = "\n".join(user['key-pairs'])
5394 else:
5395 user_keys = None
5396
5397 add_user_name = """
5398 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5399 """.format(user_name=user_name)
5400
5401 bash_script+= add_user_name
5402
5403 if user_keys:
5404 user_keys_data = """
5405 mkdir /home/{user_name}/.ssh
5406 chown {user_name}:{user_name} /home/{user_name}/.ssh
5407 chmod 700 /home/{user_name}/.ssh
5408 touch /home/{user_name}/.ssh/authorized_keys
5409 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5410 chmod 600 /home/{user_name}/.ssh/authorized_keys
5411 # make centos with selinux happy
5412 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5413 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5414 """.format(user_name=user_name,user_key=user_keys)
5415
5416 bash_script+= user_keys_data
5417
5418 return bash_script+"\n\tfi"
5419
5420 def guest_customization(self, vapp, customize_script):
5421 """
5422 Method to customize guest os
5423 vapp - Vapp object
5424 customize_script - Customize script to be run at first boot of VM.
5425 """
5426 for vm in vapp.get_all_vms():
5427 vm_id = vm.get('id').split(':')[-1]
5428 vm_name = vm.get('name')
5429 vm_name = vm_name.replace('_','-')
5430
5431 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5432 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5433 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5434
5435 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5436
5437 data = """<GuestCustomizationSection
5438 xmlns="http://www.vmware.com/vcloud/v1.5"
5439 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5440 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5441 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5442 <Enabled>true</Enabled>
5443 <ChangeSid>false</ChangeSid>
5444 <VirtualMachineId>{}</VirtualMachineId>
5445 <JoinDomainEnabled>false</JoinDomainEnabled>
5446 <UseOrgSettings>false</UseOrgSettings>
5447 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5448 <AdminPasswordAuto>true</AdminPasswordAuto>
5449 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5450 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5451 <ResetPasswordRequired>false</ResetPasswordRequired>
5452 <CustomizationScript>{}</CustomizationScript>
5453 <ComputerName>{}</ComputerName>
5454 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5455 </GuestCustomizationSection>
5456 """.format(vm_customization_url,
5457 vm_id,
5458 customize_script,
5459 vm_name,
5460 vm_customization_url)
5461
5462 response = self.perform_request(req_type='PUT',
5463 url=vm_customization_url,
5464 headers=headers,
5465 data=data)
5466 if response.status_code == 202:
5467 guest_task = self.get_task_from_response(response.content)
5468 self.client.get_task_monitor().wait_for_success(task=guest_task)
5469 self.logger.info("guest_customization : customized guest os task "\
5470 "completed for VM {}".format(vm_name))
5471 else:
5472 self.logger.error("guest_customization : task for customized guest os"\
5473 "failed for VM {}".format(vm_name))
5474 raise vimconn.vimconnException("guest_customization : failed to perform"\
5475 "guest os customization on VM {}".format(vm_name))
5476
5477 def add_new_disk(self, vapp_uuid, disk_size):
5478 """
5479 Method to create an empty vm disk
5480
5481 Args:
5482 vapp_uuid - is vapp identifier.
5483 disk_size - size of disk to be created in GB
5484
5485 Returns:
5486 None
5487 """
5488 status = False
5489 vm_details = None
5490 try:
5491 #Disk size in GB, convert it into MB
5492 if disk_size is not None:
5493 disk_size_mb = int(disk_size) * 1024
5494 vm_details = self.get_vapp_details_rest(vapp_uuid)
5495
5496 if vm_details and "vm_virtual_hardware" in vm_details:
5497 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5498 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5499 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5500
5501 except Exception as exp:
5502 msg = "Error occurred while creating new disk {}.".format(exp)
5503 self.rollback_newvm(vapp_uuid, msg)
5504
5505 if status:
5506 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5507 else:
5508 #If failed to add disk, delete VM
5509 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5510 self.rollback_newvm(vapp_uuid, msg)
5511
5512
5513 def add_new_disk_rest(self, disk_href, disk_size_mb):
5514 """
5515 Retrives vApp Disks section & add new empty disk
5516
5517 Args:
5518 disk_href: Disk section href to addd disk
5519 disk_size_mb: Disk size in MB
5520
5521 Returns: Status of add new disk task
5522 """
5523 status = False
5524 if self.client._session:
5525 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5526 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5527 response = self.perform_request(req_type='GET',
5528 url=disk_href,
5529 headers=headers)
5530
5531 if response.status_code == 403:
5532 response = self.retry_rest('GET', disk_href)
5533
5534 if response.status_code != requests.codes.ok:
5535 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5536 .format(disk_href, response.status_code))
5537 return status
5538 try:
5539 #Find but type & max of instance IDs assigned to disks
5540 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5541 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
5542 #For python3
5543 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
5544 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5545 instance_id = 0
5546 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5547 if item.find("rasd:Description",namespaces).text == "Hard disk":
5548 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5549 if inst_id > instance_id:
5550 instance_id = inst_id
5551 disk_item = item.find("rasd:HostResource" ,namespaces)
5552 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5553 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5554
5555 instance_id = instance_id + 1
5556 new_item = """<Item>
5557 <rasd:Description>Hard disk</rasd:Description>
5558 <rasd:ElementName>New disk</rasd:ElementName>
5559 <rasd:HostResource
5560 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5561 vcloud:capacity="{}"
5562 vcloud:busSubType="{}"
5563 vcloud:busType="{}"></rasd:HostResource>
5564 <rasd:InstanceID>{}</rasd:InstanceID>
5565 <rasd:ResourceType>17</rasd:ResourceType>
5566 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5567
5568 new_data = response.content
5569 #Add new item at the bottom
5570 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5571
5572 # Send PUT request to modify virtual hardware section with new disk
5573 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5574
5575 response = self.perform_request(req_type='PUT',
5576 url=disk_href,
5577 data=new_data,
5578 headers=headers)
5579
5580 if response.status_code == 403:
5581 add_headers = {'Content-Type': headers['Content-Type']}
5582 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5583
5584 if response.status_code != 202:
5585 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5586 .format(disk_href, response.status_code, response.content))
5587 else:
5588 add_disk_task = self.get_task_from_response(response.content)
5589 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5590 if result.get('status') == 'success':
5591 status = True
5592 else:
5593 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5594
5595 except Exception as exp:
5596 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5597
5598 return status
5599
5600
5601 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5602 """
5603 Method to add existing disk to vm
5604 Args :
5605 catalogs - List of VDC catalogs
5606 image_id - Catalog ID
5607 template_name - Name of template in catalog
5608 vapp_uuid - UUID of vApp
5609 Returns:
5610 None
5611 """
5612 disk_info = None
5613 vcenter_conect, content = self.get_vcenter_content()
5614 #find moref-id of vm in image
5615 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5616 image_id=image_id,
5617 )
5618
5619 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5620 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5621 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5622 if catalog_vm_moref_id:
5623 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5624 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5625 if catalog_vm_obj:
5626 #find existing disk
5627 disk_info = self.find_disk(catalog_vm_obj)
5628 else:
5629 exp_msg = "No VM with image id {} found".format(image_id)
5630 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5631 else:
5632 exp_msg = "No Image found with image ID {} ".format(image_id)
5633 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5634
5635 if disk_info:
5636 self.logger.info("Existing disk_info : {}".format(disk_info))
5637 #get VM
5638 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5639 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5640 if vm_obj:
5641 status = self.add_disk(vcenter_conect=vcenter_conect,
5642 vm=vm_obj,
5643 disk_info=disk_info,
5644 size=size,
5645 vapp_uuid=vapp_uuid
5646 )
5647 if status:
5648 self.logger.info("Disk from image id {} added to {}".format(image_id,
5649 vm_obj.config.name)
5650 )
5651 else:
5652 msg = "No disk found with image id {} to add in VM {}".format(
5653 image_id,
5654 vm_obj.config.name)
5655 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5656
5657
5658 def find_disk(self, vm_obj):
5659 """
5660 Method to find details of existing disk in VM
5661 Args :
5662 vm_obj - vCenter object of VM
5663 image_id - Catalog ID
5664 Returns:
5665 disk_info : dict of disk details
5666 """
5667 disk_info = {}
5668 if vm_obj:
5669 try:
5670 devices = vm_obj.config.hardware.device
5671 for device in devices:
5672 if type(device) is vim.vm.device.VirtualDisk:
5673 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5674 disk_info["full_path"] = device.backing.fileName
5675 disk_info["datastore"] = device.backing.datastore
5676 disk_info["capacityKB"] = device.capacityInKB
5677 break
5678 except Exception as exp:
5679 self.logger.error("find_disk() : exception occurred while "\
5680 "getting existing disk details :{}".format(exp))
5681 return disk_info
5682
5683
5684 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5685 """
5686 Method to add existing disk in VM
5687 Args :
5688 vcenter_conect - vCenter content object
5689 vm - vCenter vm object
5690 disk_info : dict of disk details
5691 Returns:
5692 status : status of add disk task
5693 """
5694 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5695 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5696 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5697 if size is not None:
5698 #Convert size from GB to KB
5699 sizeKB = int(size) * 1024 * 1024
5700 #compare size of existing disk and user given size.Assign whicherver is greater
5701 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5702 sizeKB, capacityKB))
5703 if sizeKB > capacityKB:
5704 capacityKB = sizeKB
5705
5706 if datastore and fullpath and capacityKB:
5707 try:
5708 spec = vim.vm.ConfigSpec()
5709 # get all disks on a VM, set unit_number to the next available
5710 unit_number = 0
5711 for dev in vm.config.hardware.device:
5712 if hasattr(dev.backing, 'fileName'):
5713 unit_number = int(dev.unitNumber) + 1
5714 # unit_number 7 reserved for scsi controller
5715 if unit_number == 7:
5716 unit_number += 1
5717 if isinstance(dev, vim.vm.device.VirtualDisk):
5718 #vim.vm.device.VirtualSCSIController
5719 controller_key = dev.controllerKey
5720
5721 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5722 unit_number, controller_key))
5723 # add disk here
5724 dev_changes = []
5725 disk_spec = vim.vm.device.VirtualDeviceSpec()
5726 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5727 disk_spec.device = vim.vm.device.VirtualDisk()
5728 disk_spec.device.backing = \
5729 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5730 disk_spec.device.backing.thinProvisioned = True
5731 disk_spec.device.backing.diskMode = 'persistent'
5732 disk_spec.device.backing.datastore = datastore
5733 disk_spec.device.backing.fileName = fullpath
5734
5735 disk_spec.device.unitNumber = unit_number
5736 disk_spec.device.capacityInKB = capacityKB
5737 disk_spec.device.controllerKey = controller_key
5738 dev_changes.append(disk_spec)
5739 spec.deviceChange = dev_changes
5740 task = vm.ReconfigVM_Task(spec=spec)
5741 status = self.wait_for_vcenter_task(task, vcenter_conect)
5742 return status
5743 except Exception as exp:
5744 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5745 "{} to vm {}".format(exp,
5746 fullpath,
5747 vm.config.name)
5748 self.rollback_newvm(vapp_uuid, exp_msg)
5749 else:
5750 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5751 self.rollback_newvm(vapp_uuid, msg)
5752
5753
5754 def get_vcenter_content(self):
5755 """
5756 Get the vsphere content object
5757 """
5758 try:
5759 vm_vcenter_info = self.get_vm_vcenter_info()
5760 except Exception as exp:
5761 self.logger.error("Error occurred while getting vCenter infromationn"\
5762 " for VM : {}".format(exp))
5763 raise vimconn.vimconnException(message=exp)
5764
5765 context = None
5766 if hasattr(ssl, '_create_unverified_context'):
5767 context = ssl._create_unverified_context()
5768
5769 vcenter_conect = SmartConnect(
5770 host=vm_vcenter_info["vm_vcenter_ip"],
5771 user=vm_vcenter_info["vm_vcenter_user"],
5772 pwd=vm_vcenter_info["vm_vcenter_password"],
5773 port=int(vm_vcenter_info["vm_vcenter_port"]),
5774 sslContext=context
5775 )
5776 atexit.register(Disconnect, vcenter_conect)
5777 content = vcenter_conect.RetrieveContent()
5778 return vcenter_conect, content
5779
5780
5781 def get_vm_moref_id(self, vapp_uuid):
5782 """
5783 Get the moref_id of given VM
5784 """
5785 try:
5786 if vapp_uuid:
5787 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5788 if vm_details and "vm_vcenter_info" in vm_details:
5789 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5790 return vm_moref_id
5791
5792 except Exception as exp:
5793 self.logger.error("Error occurred while getting VM moref ID "\
5794 " for VM : {}".format(exp))
5795 return None
5796
5797
5798 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5799 """
5800 Method to get vApp template details
5801 Args :
5802 catalogs - list of VDC catalogs
5803 image_id - Catalog ID to find
5804 template_name : template name in catalog
5805 Returns:
5806 parsed_respond : dict of vApp tempalte details
5807 """
5808 parsed_response = {}
5809
5810 vca = self.connect_as_admin()
5811 if not vca:
5812 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5813
5814 try:
5815 org, vdc = self.get_vdc_details()
5816 catalog = self.get_catalog_obj(image_id, catalogs)
5817 if catalog:
5818 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5819 catalog_items = [items.attrib]
5820
5821 if len(catalog_items) == 1:
5822 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5823 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5824
5825 response = self.perform_request(req_type='GET',
5826 url=catalog_items[0].get('href'),
5827 headers=headers)
5828 catalogItem = XmlElementTree.fromstring(response.content)
5829 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5830 vapp_tempalte_href = entity.get("href")
5831 #get vapp details and parse moref id
5832
5833 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5834 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5835 'vmw': 'http://www.vmware.com/schema/ovf',
5836 'vm': 'http://www.vmware.com/vcloud/v1.5',
5837 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5838 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5839 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5840 }
5841
5842 if vca._session:
5843 response = self.perform_request(req_type='GET',
5844 url=vapp_tempalte_href,
5845 headers=headers)
5846
5847 if response.status_code != requests.codes.ok:
5848 self.logger.debug("REST API call {} failed. Return status code {}".format(
5849 vapp_tempalte_href, response.status_code))
5850
5851 else:
5852 xmlroot_respond = XmlElementTree.fromstring(response.content)
5853 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5854 if children_section is not None:
5855 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5856 if vCloud_extension_section is not None:
5857 vm_vcenter_info = {}
5858 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5859 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5860 if vmext is not None:
5861 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5862 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5863
5864 except Exception as exp :
5865 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5866
5867 return parsed_response
5868
5869
5870 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5871 """
5872 Method to delete vApp
5873 Args :
5874 vapp_uuid - vApp UUID
5875 msg - Error message to be logged
5876 exp_type : Exception type
5877 Returns:
5878 None
5879 """
5880 if vapp_uuid:
5881 status = self.delete_vminstance(vapp_uuid)
5882 else:
5883 msg = "No vApp ID"
5884 self.logger.error(msg)
5885 if exp_type == "Genric":
5886 raise vimconn.vimconnException(msg)
5887 elif exp_type == "NotFound":
5888 raise vimconn.vimconnNotFoundException(message=msg)
5889
5890 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5891 """
5892 Method to attach SRIOV adapters to VM
5893
5894 Args:
5895 vapp_uuid - uuid of vApp/VM
5896 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5897 vmname_andid - vmname
5898
5899 Returns:
5900 The status of add SRIOV adapter task , vm object and
5901 vcenter_conect object
5902 """
5903 vm_obj = None
5904 vcenter_conect, content = self.get_vcenter_content()
5905 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5906
5907 if vm_moref_id:
5908 try:
5909 no_of_sriov_devices = len(sriov_nets)
5910 if no_of_sriov_devices > 0:
5911 #Get VM and its host
5912 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5913 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5914 if host_obj and vm_obj:
5915 #get SRIOV devies from host on which vapp is currently installed
5916 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5917 no_of_sriov_devices,
5918 )
5919
5920 if len(avilable_sriov_devices) == 0:
5921 #find other hosts with active pci devices
5922 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5923 content,
5924 no_of_sriov_devices,
5925 )
5926
5927 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5928 #Migrate vm to the host where SRIOV devices are available
5929 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5930 new_host_obj))
5931 task = self.relocate_vm(new_host_obj, vm_obj)
5932 if task is not None:
5933 result = self.wait_for_vcenter_task(task, vcenter_conect)
5934 self.logger.info("Migrate VM status: {}".format(result))
5935 host_obj = new_host_obj
5936 else:
5937 self.logger.info("Fail to migrate VM : {}".format(result))
5938 raise vimconn.vimconnNotFoundException(
5939 "Fail to migrate VM : {} to host {}".format(
5940 vmname_andid,
5941 new_host_obj)
5942 )
5943
5944 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5945 #Add SRIOV devices one by one
5946 for sriov_net in sriov_nets:
5947 network_name = sriov_net.get('net_id')
5948 dvs_portgr_name = self.create_dvPort_group(network_name)
5949 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
5950 #add vlan ID ,Modify portgroup for vlan ID
5951 self.configure_vlanID(content, vcenter_conect, network_name)
5952
5953 task = self.add_sriov_to_vm(content,
5954 vm_obj,
5955 host_obj,
5956 network_name,
5957 avilable_sriov_devices[0]
5958 )
5959 if task:
5960 status= self.wait_for_vcenter_task(task, vcenter_conect)
5961 if status:
5962 self.logger.info("Added SRIOV {} to VM {}".format(
5963 no_of_sriov_devices,
5964 str(vm_obj)))
5965 else:
5966 self.logger.error("Fail to add SRIOV {} to VM {}".format(
5967 no_of_sriov_devices,
5968 str(vm_obj)))
5969 raise vimconn.vimconnUnexpectedResponse(
5970 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
5971 )
5972 return True, vm_obj, vcenter_conect
5973 else:
5974 self.logger.error("Currently there is no host with"\
5975 " {} number of avaialble SRIOV "\
5976 "VFs required for VM {}".format(
5977 no_of_sriov_devices,
5978 vmname_andid)
5979 )
5980 raise vimconn.vimconnNotFoundException(
5981 "Currently there is no host with {} "\
5982 "number of avaialble SRIOV devices required for VM {}".format(
5983 no_of_sriov_devices,
5984 vmname_andid))
5985 else:
5986 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
5987
5988 except vmodl.MethodFault as error:
5989 self.logger.error("Error occurred while adding SRIOV {} ",error)
5990 return None, vm_obj, vcenter_conect
5991
5992
5993 def get_sriov_devices(self,host, no_of_vfs):
5994 """
5995 Method to get the details of SRIOV devices on given host
5996 Args:
5997 host - vSphere host object
5998 no_of_vfs - number of VFs needed on host
5999
6000 Returns:
6001 array of SRIOV devices
6002 """
6003 sriovInfo=[]
6004 if host:
6005 for device in host.config.pciPassthruInfo:
6006 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
6007 if device.numVirtualFunction >= no_of_vfs:
6008 sriovInfo.append(device)
6009 break
6010 return sriovInfo
6011
6012
6013 def get_host_and_sriov_devices(self, content, no_of_vfs):
6014 """
6015 Method to get the details of SRIOV devices infromation on all hosts
6016
6017 Args:
6018 content - vSphere host object
6019 no_of_vfs - number of pci VFs needed on host
6020
6021 Returns:
6022 array of SRIOV devices and host object
6023 """
6024 host_obj = None
6025 sriov_device_objs = None
6026 try:
6027 if content:
6028 container = content.viewManager.CreateContainerView(content.rootFolder,
6029 [vim.HostSystem], True)
6030 for host in container.view:
6031 devices = self.get_sriov_devices(host, no_of_vfs)
6032 if devices:
6033 host_obj = host
6034 sriov_device_objs = devices
6035 break
6036 except Exception as exp:
6037 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6038
6039 return host_obj,sriov_device_objs
6040
6041
6042 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6043 """
6044 Method to add SRIOV adapter to vm
6045
6046 Args:
6047 host_obj - vSphere host object
6048 vm_obj - vSphere vm object
6049 content - vCenter content object
6050 network_name - name of distributed virtaul portgroup
6051 sriov_device - SRIOV device info
6052
6053 Returns:
6054 task object
6055 """
6056 devices = []
6057 vnic_label = "sriov nic"
6058 try:
6059 dvs_portgr = self.get_dvport_group(network_name)
6060 network_name = dvs_portgr.name
6061 nic = vim.vm.device.VirtualDeviceSpec()
6062 # VM device
6063 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6064 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6065 nic.device.addressType = 'assigned'
6066 #nic.device.key = 13016
6067 nic.device.deviceInfo = vim.Description()
6068 nic.device.deviceInfo.label = vnic_label
6069 nic.device.deviceInfo.summary = network_name
6070 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6071
6072 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6073 nic.device.backing.deviceName = network_name
6074 nic.device.backing.useAutoDetect = False
6075 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6076 nic.device.connectable.startConnected = True
6077 nic.device.connectable.allowGuestControl = True
6078
6079 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6080 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6081 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6082
6083 devices.append(nic)
6084 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6085 task = vm_obj.ReconfigVM_Task(vmconf)
6086 return task
6087 except Exception as exp:
6088 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6089 return None
6090
6091
6092 def create_dvPort_group(self, network_name):
6093 """
6094 Method to create disributed virtual portgroup
6095
6096 Args:
6097 network_name - name of network/portgroup
6098
6099 Returns:
6100 portgroup key
6101 """
6102 try:
6103 new_network_name = [network_name, '-', str(uuid.uuid4())]
6104 network_name=''.join(new_network_name)
6105 vcenter_conect, content = self.get_vcenter_content()
6106
6107 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6108 if dv_switch:
6109 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6110 dv_pg_spec.name = network_name
6111
6112 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6113 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6114 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6115 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6116 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6117 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6118
6119 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6120 self.wait_for_vcenter_task(task, vcenter_conect)
6121
6122 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6123 if dvPort_group:
6124 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6125 return dvPort_group.key
6126 else:
6127 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6128
6129 except Exception as exp:
6130 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6131 " : {}".format(network_name, exp))
6132 return None
6133
6134 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6135 """
6136 Method to reconfigure disributed virtual portgroup
6137
6138 Args:
6139 dvPort_group_name - name of disributed virtual portgroup
6140 content - vCenter content object
6141 config_info - disributed virtual portgroup configuration
6142
6143 Returns:
6144 task object
6145 """
6146 try:
6147 dvPort_group = self.get_dvport_group(dvPort_group_name)
6148 if dvPort_group:
6149 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6150 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6151 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6152 if "vlanID" in config_info:
6153 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6154 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6155
6156 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6157 return task
6158 else:
6159 return None
6160 except Exception as exp:
6161 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6162 " : {}".format(dvPort_group_name, exp))
6163 return None
6164
6165
6166 def destroy_dvport_group(self , dvPort_group_name):
6167 """
6168 Method to destroy disributed virtual portgroup
6169
6170 Args:
6171 network_name - name of network/portgroup
6172
6173 Returns:
6174 True if portgroup successfully got deleted else false
6175 """
6176 vcenter_conect, content = self.get_vcenter_content()
6177 try:
6178 status = None
6179 dvPort_group = self.get_dvport_group(dvPort_group_name)
6180 if dvPort_group:
6181 task = dvPort_group.Destroy_Task()
6182 status = self.wait_for_vcenter_task(task, vcenter_conect)
6183 return status
6184 except vmodl.MethodFault as exp:
6185 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6186 exp, dvPort_group_name))
6187 return None
6188
6189
6190 def get_dvport_group(self, dvPort_group_name):
6191 """
6192 Method to get disributed virtual portgroup
6193
6194 Args:
6195 network_name - name of network/portgroup
6196
6197 Returns:
6198 portgroup object
6199 """
6200 vcenter_conect, content = self.get_vcenter_content()
6201 dvPort_group = None
6202 try:
6203 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6204 for item in container.view:
6205 if item.key == dvPort_group_name:
6206 dvPort_group = item
6207 break
6208 return dvPort_group
6209 except vmodl.MethodFault as exp:
6210 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6211 exp, dvPort_group_name))
6212 return None
6213
6214 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6215 """
6216 Method to get disributed virtual portgroup vlanID
6217
6218 Args:
6219 network_name - name of network/portgroup
6220
6221 Returns:
6222 vlan ID
6223 """
6224 vlanId = None
6225 try:
6226 dvPort_group = self.get_dvport_group(dvPort_group_name)
6227 if dvPort_group:
6228 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6229 except vmodl.MethodFault as exp:
6230 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6231 exp, dvPort_group_name))
6232 return vlanId
6233
6234
6235 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6236 """
6237 Method to configure vlanID in disributed virtual portgroup vlanID
6238
6239 Args:
6240 network_name - name of network/portgroup
6241
6242 Returns:
6243 None
6244 """
6245 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6246 if vlanID == 0:
6247 #configure vlanID
6248 vlanID = self.genrate_vlanID(dvPort_group_name)
6249 config = {"vlanID":vlanID}
6250 task = self.reconfig_portgroup(content, dvPort_group_name,
6251 config_info=config)
6252 if task:
6253 status= self.wait_for_vcenter_task(task, vcenter_conect)
6254 if status:
6255 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6256 dvPort_group_name,vlanID))
6257 else:
6258 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6259 dvPort_group_name, vlanID))
6260
6261
6262 def genrate_vlanID(self, network_name):
6263 """
6264 Method to get unused vlanID
6265 Args:
6266 network_name - name of network/portgroup
6267 Returns:
6268 vlanID
6269 """
6270 vlan_id = None
6271 used_ids = []
6272 if self.config.get('vlanID_range') == None:
6273 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6274 "at config value before creating sriov network with vlan tag")
6275 if "used_vlanIDs" not in self.persistent_info:
6276 self.persistent_info["used_vlanIDs"] = {}
6277 else:
6278 used_ids = self.persistent_info["used_vlanIDs"].values()
6279 #For python3
6280 #used_ids = list(self.persistent_info["used_vlanIDs"].values())
6281
6282 for vlanID_range in self.config.get('vlanID_range'):
6283 start_vlanid , end_vlanid = vlanID_range.split("-")
6284 if start_vlanid > end_vlanid:
6285 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6286 vlanID_range))
6287
6288 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
6289 #For python3
6290 #for id in range(int(start_vlanid), int(end_vlanid) + 1):
6291 if id not in used_ids:
6292 vlan_id = id
6293 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6294 return vlan_id
6295 if vlan_id is None:
6296 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6297
6298
6299 def get_obj(self, content, vimtype, name):
6300 """
6301 Get the vsphere object associated with a given text name
6302 """
6303 obj = None
6304 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6305 for item in container.view:
6306 if item.name == name:
6307 obj = item
6308 break
6309 return obj
6310
6311
6312 def insert_media_to_vm(self, vapp, image_id):
6313 """
6314 Method to insert media CD-ROM (ISO image) from catalog to vm.
6315 vapp - vapp object to get vm id
6316 Image_id - image id for cdrom to be inerted to vm
6317 """
6318 # create connection object
6319 vca = self.connect()
6320 try:
6321 # fetching catalog details
6322 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6323 if vca._session:
6324 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6325 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6326 response = self.perform_request(req_type='GET',
6327 url=rest_url,
6328 headers=headers)
6329
6330 if response.status_code != 200:
6331 self.logger.error("REST call {} failed reason : {}"\
6332 "status code : {}".format(url_rest_call,
6333 response.content,
6334 response.status_code))
6335 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6336 "catalog details")
6337 # searching iso name and id
6338 iso_name,media_id = self.get_media_details(vca, response.content)
6339
6340 if iso_name and media_id:
6341 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6342 <ns6:MediaInsertOrEjectParams
6343 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6344 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6345 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6346 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6347 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6348 xmlns:ns7="http://www.vmware.com/schema/ovf"
6349 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6350 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6351 <ns6:Media
6352 type="application/vnd.vmware.vcloud.media+xml"
6353 name="{}"
6354 id="urn:vcloud:media:{}"
6355 href="https://{}/api/media/{}"/>
6356 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6357 self.url,media_id)
6358
6359 for vms in vapp.get_all_vms():
6360 vm_id = vms.get('id').split(':')[-1]
6361
6362 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6363 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6364
6365 response = self.perform_request(req_type='POST',
6366 url=rest_url,
6367 data=data,
6368 headers=headers)
6369
6370 if response.status_code != 202:
6371 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6372 "Status code {}".format(response.text, response.status_code)
6373 self.logger.error(error_msg)
6374 raise vimconn.vimconnException(error_msg)
6375 else:
6376 task = self.get_task_from_response(response.content)
6377 result = self.client.get_task_monitor().wait_for_success(task=task)
6378 if result.get('status') == 'success':
6379 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6380 " image to vm {}".format(vm_id))
6381
6382 except Exception as exp:
6383 self.logger.error("insert_media_to_vm() : exception occurred "\
6384 "while inserting media CD-ROM")
6385 raise vimconn.vimconnException(message=exp)
6386
6387
6388 def get_media_details(self, vca, content):
6389 """
6390 Method to get catalog item details
6391 vca - connection object
6392 content - Catalog details
6393 Return - Media name, media id
6394 """
6395 cataloghref_list = []
6396 try:
6397 if content:
6398 vm_list_xmlroot = XmlElementTree.fromstring(content)
6399 for child in vm_list_xmlroot.iter():
6400 if 'CatalogItem' in child.tag:
6401 cataloghref_list.append(child.attrib.get('href'))
6402 if cataloghref_list is not None:
6403 for href in cataloghref_list:
6404 if href:
6405 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6406 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6407 response = self.perform_request(req_type='GET',
6408 url=href,
6409 headers=headers)
6410 if response.status_code != 200:
6411 self.logger.error("REST call {} failed reason : {}"\
6412 "status code : {}".format(href,
6413 response.content,
6414 response.status_code))
6415 raise vimconn.vimconnException("get_media_details : Failed to get "\
6416 "catalogitem details")
6417 list_xmlroot = XmlElementTree.fromstring(response.content)
6418 for child in list_xmlroot.iter():
6419 if 'Entity' in child.tag:
6420 if 'media' in child.attrib.get('href'):
6421 name = child.attrib.get('name')
6422 media_id = child.attrib.get('href').split('/').pop()
6423 return name,media_id
6424 else:
6425 self.logger.debug("Media name and id not found")
6426 return False,False
6427 except Exception as exp:
6428 self.logger.error("get_media_details : exception occurred "\
6429 "getting media details")
6430 raise vimconn.vimconnException(message=exp)
6431
6432
6433 def retry_rest(self, method, url, add_headers=None, data=None):
6434 """ Method to get Token & retry respective REST request
6435 Args:
6436 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6437 url - request url to be used
6438 add_headers - Additional headers (optional)
6439 data - Request payload data to be passed in request
6440 Returns:
6441 response - Response of request
6442 """
6443 response = None
6444
6445 #Get token
6446 self.get_token()
6447
6448 if self.client._session:
6449 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6450 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6451
6452 if add_headers:
6453 headers.update(add_headers)
6454
6455 if method == 'GET':
6456 response = self.perform_request(req_type='GET',
6457 url=url,
6458 headers=headers)
6459 elif method == 'PUT':
6460 response = self.perform_request(req_type='PUT',
6461 url=url,
6462 headers=headers,
6463 data=data)
6464 elif method == 'POST':
6465 response = self.perform_request(req_type='POST',
6466 url=url,
6467 headers=headers,
6468 data=data)
6469 elif method == 'DELETE':
6470 response = self.perform_request(req_type='DELETE',
6471 url=url,
6472 headers=headers)
6473 return response
6474
6475
6476 def get_token(self):
6477 """ Generate a new token if expired
6478
6479 Returns:
6480 The return client object that letter can be used to connect to vCloud director as admin for VDC
6481 """
6482 try:
6483 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6484 self.user,
6485 self.org_name))
6486 host = self.url
6487 client = Client(host, verify_ssl_certs=False)
6488 client.set_highest_supported_version()
6489 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6490 # connection object
6491 self.client = client
6492
6493 except:
6494 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6495 "{} as user: {}".format(self.org_name, self.user))
6496
6497 if not client:
6498 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6499
6500
6501 def get_vdc_details(self):
6502 """ Get VDC details using pyVcloud Lib
6503
6504 Returns org and vdc object
6505 """
6506 vdc = None
6507 try:
6508 org = Org(self.client, resource=self.client.get_org())
6509 vdc = org.get_vdc(self.tenant_name)
6510 except Exception as e:
6511 # pyvcloud not giving a specific exception, Refresh nevertheless
6512 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6513
6514 #Retry once, if failed by refreshing token
6515 if vdc is None:
6516 self.get_token()
6517 org = Org(self.client, resource=self.client.get_org())
6518 vdc = org.get_vdc(self.tenant_name)
6519
6520 return org, vdc
6521
6522
6523 def perform_request(self, req_type, url, headers=None, data=None):
6524 """Perform the POST/PUT/GET/DELETE request."""
6525
6526 #Log REST request details
6527 self.log_request(req_type, url=url, headers=headers, data=data)
6528 # perform request and return its result
6529 if req_type == 'GET':
6530 response = requests.get(url=url,
6531 headers=headers,
6532 verify=False)
6533 elif req_type == 'PUT':
6534 response = requests.put(url=url,
6535 headers=headers,
6536 data=data,
6537 verify=False)
6538 elif req_type == 'POST':
6539 response = requests.post(url=url,
6540 headers=headers,
6541 data=data,
6542 verify=False)
6543 elif req_type == 'DELETE':
6544 response = requests.delete(url=url,
6545 headers=headers,
6546 verify=False)
6547 #Log the REST response
6548 self.log_response(response)
6549
6550 return response
6551
6552
6553 def log_request(self, req_type, url=None, headers=None, data=None):
6554 """Logs REST request details"""
6555
6556 if req_type is not None:
6557 self.logger.debug("Request type: {}".format(req_type))
6558
6559 if url is not None:
6560 self.logger.debug("Request url: {}".format(url))
6561
6562 if headers is not None:
6563 for header in headers:
6564 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6565
6566 if data is not None:
6567 self.logger.debug("Request data: {}".format(data))
6568
6569
6570 def log_response(self, response):
6571 """Logs REST response details"""
6572
6573 self.logger.debug("Response status code: {} ".format(response.status_code))
6574
6575
6576 def get_task_from_response(self, content):
6577 """
6578 content - API response content(response.content)
6579 return task object
6580 """
6581 xmlroot = XmlElementTree.fromstring(content)
6582 if xmlroot.tag.split('}')[1] == "Task":
6583 return xmlroot
6584 else:
6585 for ele in xmlroot:
6586 if ele.tag.split("}")[1] == "Tasks":
6587 task = ele[0]
6588 break
6589 return task
6590
6591
6592 def power_on_vapp(self,vapp_id, vapp_name):
6593 """
6594 vapp_id - vApp uuid
6595 vapp_name - vAapp name
6596 return - Task object
6597 """
6598 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6599 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6600
6601 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6602 vapp_id)
6603 response = self.perform_request(req_type='POST',
6604 url=poweron_href,
6605 headers=headers)
6606
6607 if response.status_code != 202:
6608 self.logger.error("REST call {} failed reason : {}"\
6609 "status code : {} ".format(poweron_href,
6610 response.content,
6611 response.status_code))
6612 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6613 "vApp {}".format(vapp_name))
6614 else:
6615 poweron_task = self.get_task_from_response(response.content)
6616 return poweron_task
6617
6618