b46bfd3210022e34c013679482677481ecfa18cd
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 from osm_ro import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import requests
37 import ssl
38 import atexit
39
40 from pyVmomi import vim, vmodl
41 from pyVim.connect import SmartConnect, Disconnect
42
43 from xml.etree import ElementTree as XmlElementTree
44 from lxml import etree as lxmlElementTree
45
46 import yaml
47 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
48 from pyvcloud.vcd.vdc import VDC
49 from pyvcloud.vcd.org import Org
50 import re
51 from pyvcloud.vcd.vapp import VApp
52 from xml.sax.saxutils import escape
53 import logging
54 import json
55 import time
56 import uuid
57 # import httplib
58 #For python3
59 #import http.client # TODO py3 check
60 import hashlib
61 import socket
62 import struct
63 import netaddr
64 import random
65
66 # global variable for vcd connector type
67 STANDALONE = 'standalone'
68
69 # key for flavor dicts
70 FLAVOR_RAM_KEY = 'ram'
71 FLAVOR_VCPUS_KEY = 'vcpus'
72 FLAVOR_DISK_KEY = 'disk'
73 DEFAULT_IP_PROFILE = {'dhcp_count':50,
74 'dhcp_enabled':True,
75 'ip_version':"IPv4"
76 }
77 # global variable for wait time
78 INTERVAL_TIME = 5
79 MAX_WAIT_TIME = 1800
80
81 API_VERSION = '27.0'
82
83 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
84 __date__ = "$09-Mar-2018 11:09:29$"
85 __version__ = '0.2'
86
87 # -1: "Could not be created",
88 # 0: "Unresolved",
89 # 1: "Resolved",
90 # 2: "Deployed",
91 # 3: "Suspended",
92 # 4: "Powered on",
93 # 5: "Waiting for user input",
94 # 6: "Unknown state",
95 # 7: "Unrecognized state",
96 # 8: "Powered off",
97 # 9: "Inconsistent state",
98 # 10: "Children do not all have the same status",
99 # 11: "Upload initiated, OVF descriptor pending",
100 # 12: "Upload initiated, copying contents",
101 # 13: "Upload initiated , disk contents pending",
102 # 14: "Upload has been quarantined",
103 # 15: "Upload quarantine period has expired"
104
105 # mapping vCD status to MANO
106 vcdStatusCode2manoFormat = {4: 'ACTIVE',
107 7: 'PAUSED',
108 3: 'SUSPENDED',
109 8: 'INACTIVE',
110 12: 'BUILD',
111 -1: 'ERROR',
112 14: 'DELETED'}
113
114 #
115 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
116 'ERROR': 'ERROR', 'DELETED': 'DELETED'
117 }
118
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '{}'".format(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '{}'".format(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
309 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
310 except Exception as e:
311 raise vimconn.vimconnException(
312 "Can't connect to vCloud director as: {} with exception {}".format(self.admin_user, e))
313
314 return client_as_admin
315
316 def connect(self):
317 """ Method connect as normal user to vCloud director.
318
319 Returns:
320 The return client object that latter can be used to connect to vCloud director as admin for VDC
321 """
322 try:
323 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
324 self.user,
325 self.org_name))
326 host = self.url
327 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
328 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
329 except Exception as e:
330 raise vimconn.vimconnConnectionException("Can't connect to vCloud director org: "
331 "{} as user {} with exception: {}".format(self.org_name,
332 self.user,
333 e))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except Exception as e:
393 self.logger.debug("Failed initialize organization UUID for org {}: {}".format(self.org_name), e)
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.text)
449 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
450 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
451 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
452 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
453
454 response = self.perform_request(req_type='DELETE',
455 url=vdc_remove_href,
456 headers=headers)
457
458 if response.status_code == 202:
459 time.sleep(5)
460 return tenant_id
461 else:
462 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
463 "Return status code {}".format(vdc_remove_href,
464 response.status_code))
465 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
466 else:
467 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
468 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
469
470
471 def get_tenant_list(self, filter_dict={}):
472 """Obtain tenants of VIM
473 filter_dict can contain the following keys:
474 name: filter by tenant name
475 id: filter by tenant uuid/id
476 <other VIM specific>
477 Returns the tenant list of dictionaries:
478 [{'name':'<name>, 'id':'<id>, ...}, ...]
479
480 """
481 org_dict = self.get_org(self.org_uuid)
482 vdcs_dict = org_dict['vdcs']
483
484 vdclist = []
485 try:
486 for k in vdcs_dict:
487 entry = {'name': vdcs_dict[k], 'id': k}
488 # if caller didn't specify dictionary we return all tenants.
489 if filter_dict is not None and filter_dict:
490 filtered_entry = entry.copy()
491 filtered_dict = set(entry.keys()) - set(filter_dict)
492 for unwanted_key in filtered_dict: del entry[unwanted_key]
493 if filter_dict == entry:
494 vdclist.append(filtered_entry)
495 else:
496 vdclist.append(entry)
497 except:
498 self.logger.debug("Error in get_tenant_list()")
499 self.logger.debug(traceback.format_exc())
500 raise vimconn.vimconnException("Incorrect state. {}")
501
502 return vdclist
503
504 def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
505 """Adds a tenant network to VIM
506 Params:
507 'net_name': name of the network
508 'net_type': one of:
509 'bridge': overlay isolated network
510 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
511 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
512 'ip_profile': is a dict containing the IP parameters of the network
513 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
514 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
515 'gateway_address': (Optional) ip_schema, that is X.X.X.X
516 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
517 'dhcp_enabled': True or False
518 'dhcp_start_address': ip_schema, first IP to grant
519 'dhcp_count': number of IPs to grant.
520 'shared': if this network can be seen/use by other tenants/organization
521 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
522 Returns a tuple with the network identifier and created_items, or raises an exception on error
523 created_items can be None or a dictionary where this method can include key-values that will be passed to
524 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
525 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
526 as not present.
527 """
528
529 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
530 .format(net_name, net_type, ip_profile, shared, provider_network_profile))
531 vlan = None
532 if provider_network_profile:
533 vlan = provider_network_profile.get("segmentation-id")
534
535 created_items = {}
536 isshared = 'false'
537 if shared:
538 isshared = 'true'
539
540 # ############# Stub code for SRIOV #################
541 # if net_type == "data" or net_type == "ptp":
542 # if self.config.get('dv_switch_name') == None:
543 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
544 # network_uuid = self.create_dvPort_group(net_name)
545 parent_network_uuid = None
546
547 if provider_network_profile is not None:
548 for k, v in provider_network_profile.items():
549 if k == 'physical_network':
550 parent_network_uuid = self.get_physical_network_by_name(v)
551
552 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
553 ip_profile=ip_profile, isshared=isshared,
554 parent_network_uuid=parent_network_uuid)
555 if network_uuid is not None:
556 return network_uuid, created_items
557 else:
558 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
559
560 def get_vcd_network_list(self):
561 """ Method available organization for a logged in tenant
562
563 Returns:
564 The return vca object that letter can be used to connect to vcloud direct as admin
565 """
566
567 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
568
569 if not self.tenant_name:
570 raise vimconn.vimconnConnectionException("Tenant name is empty.")
571
572 org, vdc = self.get_vdc_details()
573 if vdc is None:
574 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
575
576 vdc_uuid = vdc.get('id').split(":")[3]
577 if self.client._session:
578 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
579 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
580 response = self.perform_request(req_type='GET',
581 url=vdc.get('href'),
582 headers=headers)
583 if response.status_code != 200:
584 self.logger.error("Failed to get vdc content")
585 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
586 else:
587 content = XmlElementTree.fromstring(response.text)
588
589 network_list = []
590 try:
591 for item in content:
592 if item.tag.split('}')[-1] == 'AvailableNetworks':
593 for net in item:
594 response = self.perform_request(req_type='GET',
595 url=net.get('href'),
596 headers=headers)
597
598 if response.status_code != 200:
599 self.logger.error("Failed to get network content")
600 raise vimconn.vimconnNotFoundException("Failed to get network content")
601 else:
602 net_details = XmlElementTree.fromstring(response.text)
603
604 filter_dict = {}
605 net_uuid = net_details.get('id').split(":")
606 if len(net_uuid) != 4:
607 continue
608 else:
609 net_uuid = net_uuid[3]
610 # create dict entry
611 self.logger.debug("get_vcd_network_list(): Adding network {} "
612 "to a list vcd id {} network {}".format(net_uuid,
613 vdc_uuid,
614 net_details.get('name')))
615 filter_dict["name"] = net_details.get('name')
616 filter_dict["id"] = net_uuid
617 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
618 shared = True
619 else:
620 shared = False
621 filter_dict["shared"] = shared
622 filter_dict["tenant_id"] = vdc_uuid
623 if int(net_details.get('status')) == 1:
624 filter_dict["admin_state_up"] = True
625 else:
626 filter_dict["admin_state_up"] = False
627 filter_dict["status"] = "ACTIVE"
628 filter_dict["type"] = "bridge"
629 network_list.append(filter_dict)
630 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
631 except:
632 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
633 pass
634
635 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
636 return network_list
637
638 def get_network_list(self, filter_dict={}):
639 """Obtain tenant networks of VIM
640 Filter_dict can be:
641 name: network name OR/AND
642 id: network uuid OR/AND
643 shared: boolean OR/AND
644 tenant_id: tenant OR/AND
645 admin_state_up: boolean
646 status: 'ACTIVE'
647
648 [{key : value , key : value}]
649
650 Returns the network list of dictionaries:
651 [{<the fields at Filter_dict plus some VIM specific>}, ...]
652 List can be empty
653 """
654
655 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
656
657 if not self.tenant_name:
658 raise vimconn.vimconnConnectionException("Tenant name is empty.")
659
660 org, vdc = self.get_vdc_details()
661 if vdc is None:
662 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
663
664 try:
665 vdcid = vdc.get('id').split(":")[3]
666
667 if self.client._session:
668 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
669 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
670 response = self.perform_request(req_type='GET',
671 url=vdc.get('href'),
672 headers=headers)
673 if response.status_code != 200:
674 self.logger.error("Failed to get vdc content")
675 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
676 else:
677 content = XmlElementTree.fromstring(response.text)
678
679 network_list = []
680 for item in content:
681 if item.tag.split('}')[-1] == 'AvailableNetworks':
682 for net in item:
683 response = self.perform_request(req_type='GET',
684 url=net.get('href'),
685 headers=headers)
686
687 if response.status_code != 200:
688 self.logger.error("Failed to get network content")
689 raise vimconn.vimconnNotFoundException("Failed to get network content")
690 else:
691 net_details = XmlElementTree.fromstring(response.text)
692
693 filter_entry = {}
694 net_uuid = net_details.get('id').split(":")
695 if len(net_uuid) != 4:
696 continue
697 else:
698 net_uuid = net_uuid[3]
699 # create dict entry
700 self.logger.debug("get_network_list(): Adding net {}"
701 " to a list vcd id {} network {}".format(net_uuid,
702 vdcid,
703 net_details.get('name')))
704 filter_entry["name"] = net_details.get('name')
705 filter_entry["id"] = net_uuid
706 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
707 shared = True
708 else:
709 shared = False
710 filter_entry["shared"] = shared
711 filter_entry["tenant_id"] = vdcid
712 if int(net_details.get('status')) == 1:
713 filter_entry["admin_state_up"] = True
714 else:
715 filter_entry["admin_state_up"] = False
716 filter_entry["status"] = "ACTIVE"
717 filter_entry["type"] = "bridge"
718 filtered_entry = filter_entry.copy()
719
720 if filter_dict is not None and filter_dict:
721 # we remove all the key : value we don't care and match only
722 # respected field
723 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
724 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
725 if filter_dict == filter_entry:
726 network_list.append(filtered_entry)
727 else:
728 network_list.append(filtered_entry)
729 except Exception as e:
730 self.logger.debug("Error in get_network_list",exc_info=True)
731 if isinstance(e, vimconn.vimconnException):
732 raise
733 else:
734 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
735
736 self.logger.debug("Returning {}".format(network_list))
737 return network_list
738
739 def get_network(self, net_id):
740 """Method obtains network details of net_id VIM network
741 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
742
743 try:
744 org, vdc = self.get_vdc_details()
745 vdc_id = vdc.get('id').split(":")[3]
746 if self.client._session:
747 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
748 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
749 response = self.perform_request(req_type='GET',
750 url=vdc.get('href'),
751 headers=headers)
752 if response.status_code != 200:
753 self.logger.error("Failed to get vdc content")
754 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
755 else:
756 content = XmlElementTree.fromstring(response.text)
757
758 filter_dict = {}
759
760 for item in content:
761 if item.tag.split('}')[-1] == 'AvailableNetworks':
762 for net in item:
763 response = self.perform_request(req_type='GET',
764 url=net.get('href'),
765 headers=headers)
766
767 if response.status_code != 200:
768 self.logger.error("Failed to get network content")
769 raise vimconn.vimconnNotFoundException("Failed to get network content")
770 else:
771 net_details = XmlElementTree.fromstring(response.text)
772
773 vdc_network_id = net_details.get('id').split(":")
774 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
775 filter_dict["name"] = net_details.get('name')
776 filter_dict["id"] = vdc_network_id[3]
777 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
778 shared = True
779 else:
780 shared = False
781 filter_dict["shared"] = shared
782 filter_dict["tenant_id"] = vdc_id
783 if int(net_details.get('status')) == 1:
784 filter_dict["admin_state_up"] = True
785 else:
786 filter_dict["admin_state_up"] = False
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 self.logger.debug("Returning {}".format(filter_dict))
790 return filter_dict
791 else:
792 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
793 except Exception as e:
794 self.logger.debug("Error in get_network")
795 self.logger.debug(traceback.format_exc())
796 if isinstance(e, vimconn.vimconnException):
797 raise
798 else:
799 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
800
801 return filter_dict
802
803 def delete_network(self, net_id, created_items=None):
804 """
805 Removes a tenant network from VIM and its associated elements
806 :param net_id: VIM identifier of the network, provided by method new_network
807 :param created_items: dictionary with extra items to be deleted. provided by method new_network
808 Returns the network identifier or raises an exception upon error or when network is not found
809 """
810
811 # ############# Stub code for SRIOV #################
812 # dvport_group = self.get_dvport_group(net_id)
813 # if dvport_group:
814 # #delete portgroup
815 # status = self.destroy_dvport_group(net_id)
816 # if status:
817 # # Remove vlanID from persistent info
818 # if net_id in self.persistent_info["used_vlanIDs"]:
819 # del self.persistent_info["used_vlanIDs"][net_id]
820 #
821 # return net_id
822
823 vcd_network = self.get_vcd_network(network_uuid=net_id)
824 if vcd_network is not None and vcd_network:
825 if self.delete_network_action(network_uuid=net_id):
826 return net_id
827 else:
828 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
829
830 def refresh_nets_status(self, net_list):
831 """Get the status of the networks
832 Params: the list of network identifiers
833 Returns a dictionary with:
834 net_id: #VIM id of this network
835 status: #Mandatory. Text with one of:
836 # DELETED (not found at vim)
837 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
838 # OTHER (Vim reported other status not understood)
839 # ERROR (VIM indicates an ERROR status)
840 # ACTIVE, INACTIVE, DOWN (admin down),
841 # BUILD (on building process)
842 #
843 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
844 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
845
846 """
847
848 dict_entry = {}
849 try:
850 for net in net_list:
851 errormsg = ''
852 vcd_network = self.get_vcd_network(network_uuid=net)
853 if vcd_network is not None and vcd_network:
854 if vcd_network['status'] == '1':
855 status = 'ACTIVE'
856 else:
857 status = 'DOWN'
858 else:
859 status = 'DELETED'
860 errormsg = 'Network not found.'
861
862 dict_entry[net] = {'status': status, 'error_msg': errormsg,
863 'vim_info': yaml.safe_dump(vcd_network)}
864 except:
865 self.logger.debug("Error in refresh_nets_status")
866 self.logger.debug(traceback.format_exc())
867
868 return dict_entry
869
870 def get_flavor(self, flavor_id):
871 """Obtain flavor details from the VIM
872 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
873 """
874 if flavor_id not in vimconnector.flavorlist:
875 raise vimconn.vimconnNotFoundException("Flavor not found.")
876 return vimconnector.flavorlist[flavor_id]
877
878 def new_flavor(self, flavor_data):
879 """Adds a tenant flavor to VIM
880 flavor_data contains a dictionary with information, keys:
881 name: flavor name
882 ram: memory (cloud type) in MBytes
883 vpcus: cpus (cloud type)
884 extended: EPA parameters
885 - numas: #items requested in same NUMA
886 memory: number of 1G huge pages memory
887 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
888 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
889 - name: interface name
890 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
891 bandwidth: X Gbps; requested guarantee bandwidth
892 vpci: requested virtual PCI address
893 disk: disk size
894 is_public:
895 #TODO to concrete
896 Returns the flavor identifier"""
897
898 # generate a new uuid put to internal dict and return it.
899 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
900 new_flavor=flavor_data
901 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
902 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
903 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
904
905 if not isinstance(ram, int):
906 raise vimconn.vimconnException("Non-integer value for ram")
907 elif not isinstance(cpu, int):
908 raise vimconn.vimconnException("Non-integer value for cpu")
909 elif not isinstance(disk, int):
910 raise vimconn.vimconnException("Non-integer value for disk")
911
912 extended_flv = flavor_data.get("extended")
913 if extended_flv:
914 numas=extended_flv.get("numas")
915 if numas:
916 for numa in numas:
917 #overwrite ram and vcpus
918 if 'memory' in numa:
919 ram = numa['memory']*1024
920 if 'paired-threads' in numa:
921 cpu = numa['paired-threads']*2
922 elif 'cores' in numa:
923 cpu = numa['cores']
924 elif 'threads' in numa:
925 cpu = numa['threads']
926
927 new_flavor[FLAVOR_RAM_KEY] = ram
928 new_flavor[FLAVOR_VCPUS_KEY] = cpu
929 new_flavor[FLAVOR_DISK_KEY] = disk
930 # generate a new uuid put to internal dict and return it.
931 flavor_id = uuid.uuid4()
932 vimconnector.flavorlist[str(flavor_id)] = new_flavor
933 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
934
935 return str(flavor_id)
936
937 def delete_flavor(self, flavor_id):
938 """Deletes a tenant flavor from VIM identify by its id
939
940 Returns the used id or raise an exception
941 """
942 if flavor_id not in vimconnector.flavorlist:
943 raise vimconn.vimconnNotFoundException("Flavor not found.")
944
945 vimconnector.flavorlist.pop(flavor_id, None)
946 return flavor_id
947
948 def new_image(self, image_dict):
949 """
950 Adds a tenant image to VIM
951 Returns:
952 200, image-id if the image is created
953 <0, message if there is an error
954 """
955
956 return self.get_image_id_from_path(image_dict['location'])
957
958 def delete_image(self, image_id):
959 """
960 Deletes a tenant image from VIM
961 Args:
962 image_id is ID of Image to be deleted
963 Return:
964 returns the image identifier in UUID format or raises an exception on error
965 """
966 conn = self.connect_as_admin()
967 if not conn:
968 raise vimconn.vimconnConnectionException("Failed to connect vCD")
969 # Get Catalog details
970 url_list = [self.url, '/api/catalog/', image_id]
971 catalog_herf = ''.join(url_list)
972
973 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
974 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
975
976 response = self.perform_request(req_type='GET',
977 url=catalog_herf,
978 headers=headers)
979
980 if response.status_code != requests.codes.ok:
981 self.logger.debug("delete_image():GET REST API call {} failed. "\
982 "Return status code {}".format(catalog_herf,
983 response.status_code))
984 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
985
986 lxmlroot_respond = lxmlElementTree.fromstring(response.text)
987 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
988 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
989
990 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
991 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
992 for catalogItem in catalogItems:
993 catalogItem_href = catalogItem.attrib['href']
994
995 response = self.perform_request(req_type='GET',
996 url=catalogItem_href,
997 headers=headers)
998
999 if response.status_code != requests.codes.ok:
1000 self.logger.debug("delete_image():GET REST API call {} failed. "\
1001 "Return status code {}".format(catalog_herf,
1002 response.status_code))
1003 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
1004 catalogItem,
1005 image_id))
1006
1007 lxmlroot_respond = lxmlElementTree.fromstring(response.text)
1008 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
1009 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
1010 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
1011
1012 #Remove catalogItem
1013 response = self.perform_request(req_type='DELETE',
1014 url=catalogitem_remove_href,
1015 headers=headers)
1016 if response.status_code == requests.codes.no_content:
1017 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1018 else:
1019 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1020
1021 #Remove catalog
1022 url_list = [self.url, '/api/admin/catalog/', image_id]
1023 catalog_remove_herf = ''.join(url_list)
1024 response = self.perform_request(req_type='DELETE',
1025 url=catalog_remove_herf,
1026 headers=headers)
1027
1028 if response.status_code == requests.codes.no_content:
1029 self.logger.debug("Deleted Catalog {}".format(image_id))
1030 return image_id
1031 else:
1032 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1033
1034
1035 def catalog_exists(self, catalog_name, catalogs):
1036 """
1037
1038 :param catalog_name:
1039 :param catalogs:
1040 :return:
1041 """
1042 for catalog in catalogs:
1043 if catalog['name'] == catalog_name:
1044 return catalog['id']
1045
1046 def create_vimcatalog(self, vca=None, catalog_name=None):
1047 """ Create new catalog entry in vCloud director.
1048
1049 Args
1050 vca: vCloud director.
1051 catalog_name catalog that client wish to create. Note no validation done for a name.
1052 Client must make sure that provide valid string representation.
1053
1054 Returns catalog id if catalog created else None.
1055
1056 """
1057 try:
1058 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1059 if lxml_catalog_element:
1060 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1061 return id_attr_value.split(':')[-1]
1062 catalogs = vca.list_catalogs()
1063 except Exception as ex:
1064 self.logger.error(
1065 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1066 raise
1067 return self.catalog_exists(catalog_name, catalogs)
1068
1069 # noinspection PyIncorrectDocstring
1070 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1071 description='', progress=False, chunk_bytes=128 * 1024):
1072 """
1073 Uploads a OVF file to a vCloud catalog
1074
1075 :param chunk_bytes:
1076 :param progress:
1077 :param description:
1078 :param image_name:
1079 :param vca:
1080 :param catalog_name: (str): The name of the catalog to upload the media.
1081 :param media_file_name: (str): The name of the local media file to upload.
1082 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1083 """
1084 os.path.isfile(media_file_name)
1085 statinfo = os.stat(media_file_name)
1086
1087 # find a catalog entry where we upload OVF.
1088 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1089 # status change.
1090 # if VCD can parse OVF we upload VMDK file
1091 try:
1092 for catalog in vca.list_catalogs():
1093 if catalog_name != catalog['name']:
1094 continue
1095 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1096 data = """
1097 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1098 """.format(catalog_name, description)
1099
1100 if self.client:
1101 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1102 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1103 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1104
1105 response = self.perform_request(req_type='POST',
1106 url=catalog_href,
1107 headers=headers,
1108 data=data)
1109
1110 if response.status_code == requests.codes.created:
1111 catalogItem = XmlElementTree.fromstring(response.text)
1112 entity = [child for child in catalogItem if
1113 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1114 href = entity.get('href')
1115 template = href
1116
1117 response = self.perform_request(req_type='GET',
1118 url=href,
1119 headers=headers)
1120
1121 if response.status_code == requests.codes.ok:
1122 headers['Content-Type'] = 'Content-Type text/xml'
1123 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"', response.text)
1124 if result:
1125 transfer_href = result.group(1)
1126
1127 response = self.perform_request(req_type='PUT',
1128 url=transfer_href,
1129 headers=headers,
1130 data=open(media_file_name, 'rb'))
1131 if response.status_code != requests.codes.ok:
1132 self.logger.debug(
1133 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1134 media_file_name))
1135 return False
1136
1137 # TODO fix this with aync block
1138 time.sleep(5)
1139
1140 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1141
1142 # uploading VMDK file
1143 # check status of OVF upload and upload remaining files.
1144 response = self.perform_request(req_type='GET',
1145 url=template,
1146 headers=headers)
1147
1148 if response.status_code == requests.codes.ok:
1149 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"', response.text)
1150 if result:
1151 link_href = result.group(1)
1152 # we skip ovf since it already uploaded.
1153 if 'ovf' in link_href:
1154 continue
1155 # The OVF file and VMDK must be in a same directory
1156 head, tail = os.path.split(media_file_name)
1157 file_vmdk = head + '/' + link_href.split("/")[-1]
1158 if not os.path.isfile(file_vmdk):
1159 return False
1160 statinfo = os.stat(file_vmdk)
1161 if statinfo.st_size == 0:
1162 return False
1163 hrefvmdk = link_href
1164
1165 if progress:
1166 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1167 FileTransferSpeed()]
1168 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1169
1170 bytes_transferred = 0
1171 f = open(file_vmdk, 'rb')
1172 while bytes_transferred < statinfo.st_size:
1173 my_bytes = f.read(chunk_bytes)
1174 if len(my_bytes) <= chunk_bytes:
1175 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
1176 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1177 headers['Content-Length'] = str(len(my_bytes))
1178 response = requests.put(url=hrefvmdk,
1179 headers=headers,
1180 data=my_bytes,
1181 verify=False)
1182 if response.status_code == requests.codes.ok:
1183 bytes_transferred += len(my_bytes)
1184 if progress:
1185 progress_bar.update(bytes_transferred)
1186 else:
1187 self.logger.debug(
1188 'file upload failed with error: [{}] {}'.format(response.status_code,
1189 response.text))
1190
1191 f.close()
1192 return False
1193 f.close()
1194 if progress:
1195 progress_bar.finish()
1196 time.sleep(10)
1197 return True
1198 else:
1199 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1200 format(catalog_name, media_file_name))
1201 return False
1202 except Exception as exp:
1203 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1204 .format(catalog_name,media_file_name, exp))
1205 raise vimconn.vimconnException(
1206 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1207 .format(catalog_name,media_file_name, exp))
1208
1209 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1210 return False
1211
1212 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1213 """Upload media file"""
1214 # TODO add named parameters for readability
1215
1216 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1217 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1218
1219 def validate_uuid4(self, uuid_string=None):
1220 """ Method validate correct format of UUID.
1221
1222 Return: true if string represent valid uuid
1223 """
1224 try:
1225 val = uuid.UUID(uuid_string, version=4)
1226 except ValueError:
1227 return False
1228 return True
1229
1230 def get_catalogid(self, catalog_name=None, catalogs=None):
1231 """ Method check catalog and return catalog ID in UUID format.
1232
1233 Args
1234 catalog_name: catalog name as string
1235 catalogs: list of catalogs.
1236
1237 Return: catalogs uuid
1238 """
1239
1240 for catalog in catalogs:
1241 if catalog['name'] == catalog_name:
1242 catalog_id = catalog['id']
1243 return catalog_id
1244 return None
1245
1246 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1247 """ Method check catalog and return catalog name lookup done by catalog UUID.
1248
1249 Args
1250 catalog_name: catalog name as string
1251 catalogs: list of catalogs.
1252
1253 Return: catalogs name or None
1254 """
1255
1256 if not self.validate_uuid4(uuid_string=catalog_uuid):
1257 return None
1258
1259 for catalog in catalogs:
1260 catalog_id = catalog.get('id')
1261 if catalog_id == catalog_uuid:
1262 return catalog.get('name')
1263 return None
1264
1265 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1266 """ Method check catalog and return catalog name lookup done by catalog UUID.
1267
1268 Args
1269 catalog_name: catalog name as string
1270 catalogs: list of catalogs.
1271
1272 Return: catalogs name or None
1273 """
1274
1275 if not self.validate_uuid4(uuid_string=catalog_uuid):
1276 return None
1277
1278 for catalog in catalogs:
1279 catalog_id = catalog.get('id')
1280 if catalog_id == catalog_uuid:
1281 return catalog
1282 return None
1283
1284 def get_image_id_from_path(self, path=None, progress=False):
1285 """ Method upload OVF image to vCloud director.
1286
1287 Each OVF image represented as single catalog entry in vcloud director.
1288 The method check for existing catalog entry. The check done by file name without file extension.
1289
1290 if given catalog name already present method will respond with existing catalog uuid otherwise
1291 it will create new catalog entry and upload OVF file to newly created catalog.
1292
1293 If method can't create catalog entry or upload a file it will throw exception.
1294
1295 Method accept boolean flag progress that will output progress bar. It useful method
1296 for standalone upload use case. In case to test large file upload.
1297
1298 Args
1299 path: - valid path to OVF file.
1300 progress - boolean progress bar show progress bar.
1301
1302 Return: if image uploaded correct method will provide image catalog UUID.
1303 """
1304
1305 if not path:
1306 raise vimconn.vimconnException("Image path can't be None.")
1307
1308 if not os.path.isfile(path):
1309 raise vimconn.vimconnException("Can't read file. File not found.")
1310
1311 if not os.access(path, os.R_OK):
1312 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1313
1314 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1315
1316 dirpath, filename = os.path.split(path)
1317 flname, file_extension = os.path.splitext(path)
1318 if file_extension != '.ovf':
1319 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1320 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1321
1322 catalog_name = os.path.splitext(filename)[0]
1323 catalog_md5_name = hashlib.md5(path.encode('utf-8')).hexdigest()
1324 self.logger.debug("File name {} Catalog Name {} file path {} "
1325 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1326
1327 try:
1328 org,vdc = self.get_vdc_details()
1329 catalogs = org.list_catalogs()
1330 except Exception as exp:
1331 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1332 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1333
1334 if len(catalogs) == 0:
1335 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1336 if self.create_vimcatalog(org, catalog_md5_name) is None:
1337 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1338
1339 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1340 media_name=filename, medial_file_name=path, progress=progress)
1341 if not result:
1342 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1343 return self.get_catalogid(catalog_name, catalogs)
1344 else:
1345 for catalog in catalogs:
1346 # search for existing catalog if we find same name we return ID
1347 # TODO optimize this
1348 if catalog['name'] == catalog_md5_name:
1349 self.logger.debug("Found existing catalog entry for {} "
1350 "catalog id {}".format(catalog_name,
1351 self.get_catalogid(catalog_md5_name, catalogs)))
1352 return self.get_catalogid(catalog_md5_name, catalogs)
1353
1354 # if we didn't find existing catalog we create a new one and upload image.
1355 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1356 if self.create_vimcatalog(org, catalog_md5_name) is None:
1357 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1358
1359 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1360 media_name=filename, medial_file_name=path, progress=progress)
1361 if not result:
1362 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1363
1364 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1365
1366 def get_image_list(self, filter_dict={}):
1367 '''Obtain tenant images from VIM
1368 Filter_dict can be:
1369 name: image name
1370 id: image uuid
1371 checksum: image checksum
1372 location: image path
1373 Returns the image list of dictionaries:
1374 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1375 List can be empty
1376 '''
1377
1378 try:
1379 org, vdc = self.get_vdc_details()
1380 image_list = []
1381 catalogs = org.list_catalogs()
1382 if len(catalogs) == 0:
1383 return image_list
1384 else:
1385 for catalog in catalogs:
1386 catalog_uuid = catalog.get('id')
1387 name = catalog.get('name')
1388 filtered_dict = {}
1389 if filter_dict.get("name") and filter_dict["name"] != name:
1390 continue
1391 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1392 continue
1393 filtered_dict ["name"] = name
1394 filtered_dict ["id"] = catalog_uuid
1395 image_list.append(filtered_dict)
1396
1397 self.logger.debug("List of already created catalog items: {}".format(image_list))
1398 return image_list
1399 except Exception as exp:
1400 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1401
1402 def get_vappid(self, vdc=None, vapp_name=None):
1403 """ Method takes vdc object and vApp name and returns vapp uuid or None
1404
1405 Args:
1406 vdc: The VDC object.
1407 vapp_name: is application vappp name identifier
1408
1409 Returns:
1410 The return vApp name otherwise None
1411 """
1412 if vdc is None or vapp_name is None:
1413 return None
1414 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1415 try:
1416 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity \
1417 if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1418 if len(refs) == 1:
1419 return refs[0].href.split("vapp")[1][1:]
1420 except Exception as e:
1421 self.logger.exception(e)
1422 return False
1423 return None
1424
1425 def check_vapp(self, vdc=None, vapp_uuid=None):
1426 """ Method Method returns True or False if vapp deployed in vCloud director
1427
1428 Args:
1429 vca: Connector to VCA
1430 vdc: The VDC object.
1431 vappid: vappid is application identifier
1432
1433 Returns:
1434 The return True if vApp deployed
1435 :param vdc:
1436 :param vapp_uuid:
1437 """
1438 try:
1439 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1440 if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1441 for ref in refs:
1442 vappid = ref.href.split("vapp")[1][1:]
1443 # find vapp with respected vapp uuid
1444 if vappid == vapp_uuid:
1445 return True
1446 except Exception as e:
1447 self.logger.exception(e)
1448 return False
1449 return False
1450
1451 def get_namebyvappid(self, vapp_uuid=None):
1452 """Method returns vApp name from vCD and lookup done by vapp_id.
1453
1454 Args:
1455 vapp_uuid: vappid is application identifier
1456
1457 Returns:
1458 The return vApp name otherwise None
1459 """
1460 try:
1461 if self.client and vapp_uuid:
1462 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1463 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1464 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1465
1466 response = self.perform_request(req_type='GET',
1467 url=vapp_call,
1468 headers=headers)
1469 #Retry login if session expired & retry sending request
1470 if response.status_code == 403:
1471 response = self.retry_rest('GET', vapp_call)
1472
1473 tree = XmlElementTree.fromstring(response.text)
1474 return tree.attrib['name'] if 'name' in tree.attrib else None
1475 except Exception as e:
1476 self.logger.exception(e)
1477 return None
1478 return None
1479
1480 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1481 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1482 """Adds a VM instance to VIM
1483 Params:
1484 'start': (boolean) indicates if VM must start or created in pause mode.
1485 'image_id','flavor_id': image and flavor VIM id to use for the VM
1486 'net_list': list of interfaces, each one is a dictionary with:
1487 'name': (optional) name for the interface.
1488 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1489 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1490 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1491 'mac_address': (optional) mac address to assign to this interface
1492 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1493 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1494 'type': (mandatory) can be one of:
1495 'virtual', in this case always connected to a network of type 'net_type=bridge'
1496 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1497 can created unconnected
1498 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1499 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1500 are allocated on the same physical NIC
1501 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1502 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1503 or True, it must apply the default VIM behaviour
1504 After execution the method will add the key:
1505 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1506 interface. 'net_list' is modified
1507 'cloud_config': (optional) dictionary with:
1508 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1509 'users': (optional) list of users to be inserted, each item is a dict with:
1510 'name': (mandatory) user name,
1511 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1512 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1513 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1514 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1515 'dest': (mandatory) string with the destination absolute path
1516 'encoding': (optional, by default text). Can be one of:
1517 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1518 'content' (mandatory): string with the content of the file
1519 'permissions': (optional) string with file permissions, typically octal notation '0644'
1520 'owner': (optional) file owner, string with the format 'owner:group'
1521 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1522 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1523 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1524 'size': (mandatory) string with the size of the disk in GB
1525 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1526 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1527 availability_zone_index is None
1528 Returns a tuple with the instance identifier and created_items or raises an exception on error
1529 created_items can be None or a dictionary where this method can include key-values that will be passed to
1530 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1531 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1532 as not present.
1533 """
1534 self.logger.info("Creating new instance for entry {}".format(name))
1535 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1536 "availability_zone_index {} availability_zone_list {}"\
1537 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1538 availability_zone_index, availability_zone_list))
1539
1540 #new vm name = vmname + tenant_id + uuid
1541 new_vm_name = [name, '-', str(uuid.uuid4())]
1542 vmname_andid = ''.join(new_vm_name)
1543
1544 for net in net_list:
1545 if net['type'] == "PCI-PASSTHROUGH":
1546 raise vimconn.vimconnNotSupportedException(
1547 "Current vCD version does not support type : {}".format(net['type']))
1548
1549 if len(net_list) > 10:
1550 raise vimconn.vimconnNotSupportedException(
1551 "The VM hardware versions 7 and above support upto 10 NICs only")
1552
1553 # if vm already deployed we return existing uuid
1554 # we check for presence of VDC, Catalog entry and Flavor.
1555 org, vdc = self.get_vdc_details()
1556 if vdc is None:
1557 raise vimconn.vimconnNotFoundException(
1558 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1559 catalogs = org.list_catalogs()
1560 if catalogs is None:
1561 #Retry once, if failed by refreshing token
1562 self.get_token()
1563 org = Org(self.client, resource=self.client.get_org())
1564 catalogs = org.list_catalogs()
1565 if catalogs is None:
1566 raise vimconn.vimconnNotFoundException(
1567 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1568
1569 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1570 if catalog_hash_name:
1571 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1572 else:
1573 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1574 "(Failed retrieve catalog information {})".format(name, image_id))
1575
1576 # Set vCPU and Memory based on flavor.
1577 vm_cpus = None
1578 vm_memory = None
1579 vm_disk = None
1580 numas = None
1581
1582 if flavor_id is not None:
1583 if flavor_id not in vimconnector.flavorlist:
1584 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1585 "Failed retrieve flavor information "
1586 "flavor id {}".format(name, flavor_id))
1587 else:
1588 try:
1589 flavor = vimconnector.flavorlist[flavor_id]
1590 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1591 vm_memory = flavor[FLAVOR_RAM_KEY]
1592 vm_disk = flavor[FLAVOR_DISK_KEY]
1593 extended = flavor.get("extended", None)
1594 if extended:
1595 numas=extended.get("numas", None)
1596
1597 except Exception as exp:
1598 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1599
1600 # image upload creates template name as catalog name space Template.
1601 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1602 power_on = 'false'
1603 if start:
1604 power_on = 'true'
1605
1606 # client must provide at least one entry in net_list if not we report error
1607 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1608 # If no mgmt, then the 1st NN in netlist is considered as primary net.
1609 primary_net = None
1610 primary_netname = None
1611 primary_net_href = None
1612 network_mode = 'bridged'
1613 if net_list is not None and len(net_list) > 0:
1614 for net in net_list:
1615 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1616 primary_net = net
1617 if primary_net is None:
1618 primary_net = net_list[0]
1619
1620 try:
1621 primary_net_id = primary_net['net_id']
1622 url_list = [self.url, '/api/network/', primary_net_id]
1623 primary_net_href = ''.join(url_list)
1624 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1625 if 'name' in network_dict:
1626 primary_netname = network_dict['name']
1627
1628 except KeyError:
1629 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1630 else:
1631 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1632
1633 # use: 'data', 'bridge', 'mgmt'
1634 # create vApp. Set vcpu and ram based on flavor id.
1635 try:
1636 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1637 if not vdc_obj:
1638 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1639
1640 for retry in (1, 2):
1641 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1642 catalog_items = [items.attrib]
1643
1644 if len(catalog_items) == 1:
1645 if self.client:
1646 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1647 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1648
1649 response = self.perform_request(req_type='GET',
1650 url=catalog_items[0].get('href'),
1651 headers=headers)
1652 catalogItem = XmlElementTree.fromstring(response.text)
1653 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1654 vapp_tempalte_href = entity.get("href")
1655
1656 response = self.perform_request(req_type='GET',
1657 url=vapp_tempalte_href,
1658 headers=headers)
1659 if response.status_code != requests.codes.ok:
1660 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1661 response.status_code))
1662 else:
1663 result = (response.text).replace("\n", " ")
1664
1665 vapp_template_tree = XmlElementTree.fromstring(response.text)
1666 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1667 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1668 vm_name = vm_element.get('name')
1669 vm_id = vm_element.get('id')
1670 vm_href = vm_element.get('href')
1671
1672 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1673 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1674 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1675
1676 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1677 vdc_id = vdc.get('id').split(':')[-1]
1678 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1679 vdc_id)
1680 with open(os.path.join(os.path.dirname(__file__), 'InstantiateVAppTemplateParams.xml'), 'r') as f:
1681 template = f.read()
1682
1683 data = template.format(vmname_andid,
1684 primary_netname,
1685 primary_net_href,
1686 vapp_tempalte_href,
1687 vm_href,
1688 vm_id,
1689 vm_name,
1690 primary_netname,
1691 cpu=vm_cpus,
1692 core=1,
1693 memory=vm_memory)
1694
1695 response = self.perform_request(req_type='POST',
1696 url=instantiate_vapp_href,
1697 headers=headers,
1698 data=data)
1699
1700 if response.status_code != 201:
1701 self.logger.error("REST call {} failed reason : {}"\
1702 "status code : {}".format(instantiate_vapp_href,
1703 response.text,
1704 response.status_code))
1705 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1706 "vAapp {}".format(vmname_andid))
1707 else:
1708 vapptask = self.get_task_from_response(response.text)
1709
1710 if vapptask is None and retry==1:
1711 self.get_token() # Retry getting token
1712 continue
1713 else:
1714 break
1715
1716 if vapptask is None or vapptask is False:
1717 raise vimconn.vimconnUnexpectedResponse(
1718 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1719
1720 # wait for task to complete
1721 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1722
1723 if result.get('status') == 'success':
1724 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1725 else:
1726 raise vimconn.vimconnUnexpectedResponse(
1727 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1728
1729 except Exception as exp:
1730 raise vimconn.vimconnUnexpectedResponse(
1731 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1732
1733 # we should have now vapp in undeployed state.
1734 try:
1735 vdc_obj = VDC(self.client, href=vdc.get('href'))
1736 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1737 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1738 vapp = VApp(self.client, resource=vapp_resource)
1739
1740 except Exception as exp:
1741 raise vimconn.vimconnUnexpectedResponse(
1742 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1743 .format(vmname_andid, exp))
1744
1745 if vapp_uuid is None:
1746 raise vimconn.vimconnUnexpectedResponse(
1747 "new_vminstance(): Failed to retrieve vApp {} after creation".format(vmname_andid))
1748
1749 # Add PCI passthrough/SRIOV configrations
1750 pci_devices_info = []
1751 reserve_memory = False
1752
1753 for net in net_list:
1754 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1755 pci_devices_info.append(net)
1756 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1757 reserve_memory = True
1758
1759 # Add PCI
1760 if len(pci_devices_info) > 0:
1761 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1762 vmname_andid))
1763 PCI_devices_status, _, _ = self.add_pci_devices(vapp_uuid,
1764 pci_devices_info,
1765 vmname_andid)
1766 if PCI_devices_status:
1767 self.logger.info("Added PCI devives {} to VM {}".format(
1768 pci_devices_info,
1769 vmname_andid))
1770 reserve_memory = True
1771 else:
1772 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1773 pci_devices_info,
1774 vmname_andid))
1775
1776 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
1777 self.add_serial_device(vapp_uuid)
1778
1779 if vm_disk:
1780 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1781 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1782 if result:
1783 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1784
1785 #Add new or existing disks to vApp
1786 if disk_list:
1787 added_existing_disk = False
1788 for disk in disk_list:
1789 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1790 image_id = disk['image_id']
1791 # Adding CD-ROM to VM
1792 # will revisit code once specification ready to support this feature
1793 self.insert_media_to_vm(vapp, image_id)
1794 elif "image_id" in disk and disk["image_id"] is not None:
1795 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1796 disk["image_id"] , vapp_uuid))
1797 self.add_existing_disk(catalogs=catalogs,
1798 image_id=disk["image_id"],
1799 size = disk["size"],
1800 template_name=templateName,
1801 vapp_uuid=vapp_uuid
1802 )
1803 added_existing_disk = True
1804 else:
1805 #Wait till added existing disk gets reflected into vCD database/API
1806 if added_existing_disk:
1807 time.sleep(5)
1808 added_existing_disk = False
1809 self.add_new_disk(vapp_uuid, disk['size'])
1810
1811 if numas:
1812 # Assigning numa affinity setting
1813 for numa in numas:
1814 if 'paired-threads-id' in numa:
1815 paired_threads_id = numa['paired-threads-id']
1816 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1817
1818 # add NICs & connect to networks in netlist
1819 try:
1820 vdc_obj = VDC(self.client, href=vdc.get('href'))
1821 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1822 vapp = VApp(self.client, resource=vapp_resource)
1823 vapp_id = vapp_resource.get('id').split(':')[-1]
1824
1825 self.logger.info("Removing primary NIC: ")
1826 # First remove all NICs so that NIC properties can be adjusted as needed
1827 self.remove_primary_network_adapter_from_all_vms(vapp)
1828
1829 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1830 primary_nic_index = 0
1831 nicIndex = 0
1832 for net in net_list:
1833 # openmano uses network id in UUID format.
1834 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1835 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1836 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1837
1838 if 'net_id' not in net:
1839 continue
1840
1841 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1842 #Same will be returned in refresh_vms_status() as vim_interface_id
1843 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1844
1845 interface_net_id = net['net_id']
1846 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1847 interface_network_mode = net['use']
1848
1849 if interface_network_mode == 'mgmt':
1850 primary_nic_index = nicIndex
1851
1852 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1853 - DHCP (The IP address is obtained from a DHCP service.)
1854 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1855 - NONE (No IP addressing mode specified.)"""
1856
1857 if primary_netname is not None:
1858 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1859 nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1860 if len(nets) == 1:
1861 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1862
1863 if interface_net_name != primary_netname:
1864 # connect network to VM - with all DHCP by default
1865 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1866 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1867
1868 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1869 nic_type = 'VMXNET3'
1870 if 'type' in net and net['type'] not in type_list:
1871 # fetching nic type from vnf
1872 if 'model' in net:
1873 if net['model'] is not None:
1874 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1875 nic_type = 'VMXNET3'
1876 else:
1877 nic_type = net['model']
1878
1879 self.logger.info("new_vminstance(): adding network adapter "\
1880 "to a network {}".format(nets[0].get('name')))
1881 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1882 primary_nic_index,
1883 nicIndex,
1884 net,
1885 nic_type=nic_type)
1886 else:
1887 self.logger.info("new_vminstance(): adding network adapter "\
1888 "to a network {}".format(nets[0].get('name')))
1889 if net['type'] in ['SR-IOV', 'VF']:
1890 nic_type = net['type']
1891 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1892 primary_nic_index,
1893 nicIndex,
1894 net,
1895 nic_type=nic_type)
1896 nicIndex += 1
1897
1898 # cloud-init for ssh-key injection
1899 if cloud_config:
1900 # Create a catalog which will be carrying the config drive ISO
1901 # This catalog is deleted during vApp deletion. The catalog name carries
1902 # vApp UUID and thats how it gets identified during its deletion.
1903 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1904 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1905 config_drive_catalog_name))
1906 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1907 if config_drive_catalog_id is None:
1908 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1909 "ISO".format(config_drive_catalog_name)
1910 raise Exception(error_msg)
1911
1912 # Create config-drive ISO
1913 _, userdata = self._create_user_data(cloud_config)
1914 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1915 iso_path = self.create_config_drive_iso(userdata)
1916 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1917
1918 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1919 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1920 # Attach the config-drive ISO to the VM
1921 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1922 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
1923 time.sleep(5)
1924 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1925 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1926
1927 # If VM has PCI devices or SRIOV reserve memory for VM
1928 if reserve_memory:
1929 self.reserve_memory_for_all_vms(vapp, memory_mb)
1930
1931 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1932
1933 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1934 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1935 if result.get('status') == 'success':
1936 self.logger.info("new_vminstance(): Successfully power on "\
1937 "vApp {}".format(vmname_andid))
1938 else:
1939 self.logger.error("new_vminstance(): failed to power on vApp "\
1940 "{}".format(vmname_andid))
1941
1942 except Exception as exp:
1943 try:
1944 self.delete_vminstance(vapp_uuid)
1945 except Exception as exp2:
1946 self.logger.error("new_vminstance rollback fail {}".format(exp2))
1947 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1948 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1949 .format(name, exp))
1950 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1951 .format(name, exp))
1952 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1953 wait_time = 0
1954 vapp_uuid = None
1955 while wait_time <= MAX_WAIT_TIME:
1956 try:
1957 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1958 vapp = VApp(self.client, resource=vapp_resource)
1959 except Exception as exp:
1960 raise vimconn.vimconnUnexpectedResponse(
1961 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1962 .format(vmname_andid, exp))
1963
1964 #if vapp and vapp.me.deployed:
1965 if vapp and vapp_resource.get('deployed') == 'true':
1966 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1967 break
1968 else:
1969 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1970 time.sleep(INTERVAL_TIME)
1971
1972 wait_time +=INTERVAL_TIME
1973
1974 #SET Affinity Rule for VM
1975 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
1976 #While creating VIM account user has to pass the Host Group names in availability_zone list
1977 #"availability_zone" is a part of VIM "config" parameters
1978 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
1979 #Host groups are referred as availability zones
1980 #With following procedure, deployed VM will be added into a VM group.
1981 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
1982 if(availability_zone_list):
1983 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
1984 #Admin access required for creating Affinity rules
1985 client = self.connect_as_admin()
1986 if not client:
1987 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
1988 else:
1989 self.client = client
1990 if self.client:
1991 headers = {'Accept':'application/*+xml;version=27.0',
1992 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1993 #Step1: Get provider vdc details from organization
1994 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
1995 if pvdc_href is not None:
1996 #Step2: Found required pvdc, now get resource pool information
1997 respool_href = self.get_resource_pool_details(pvdc_href, headers)
1998 if respool_href is None:
1999 #Raise error if respool_href not found
2000 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2001 .format(pvdc_href)
2002 self.log_message(msg)
2003
2004 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2005 # get availability Zone
2006 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2007 # check if provided av zone(hostGroup) is present in vCD VIM
2008 status = self.check_availibility_zone(vm_az, respool_href, headers)
2009 if status is False:
2010 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2011 "resource pool {} status: {}".format(vm_az,respool_href,status)
2012 self.log_message(msg)
2013 else:
2014 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2015
2016 #Step4: Find VM group references to create vm group
2017 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2018 if vmgrp_href == None:
2019 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2020 self.log_message(msg)
2021
2022 #Step5: Create a VmGroup with name az_VmGroup
2023 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2024 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2025 if status is not True:
2026 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2027 self.log_message(msg)
2028
2029 #VM Group url to add vms to vm group
2030 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2031
2032 #Step6: Add VM to VM Group
2033 #Find VM uuid from vapp_uuid
2034 vm_details = self.get_vapp_details_rest(vapp_uuid)
2035 vm_uuid = vm_details['vmuuid']
2036
2037 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2038 if status is not True:
2039 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2040 self.log_message(msg)
2041
2042 #Step7: Create VM to Host affinity rule
2043 addrule_href = self.get_add_rule_reference (respool_href, headers)
2044 if addrule_href is None:
2045 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2046 .format(respool_href)
2047 self.log_message(msg)
2048
2049 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2050 if status is False:
2051 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2052 .format(name, vm_az)
2053 self.log_message(msg)
2054 else:
2055 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2056 .format(name, vm_az))
2057 #Reset token to a normal user to perform other operations
2058 self.get_token()
2059
2060 if vapp_uuid is not None:
2061 return vapp_uuid, None
2062 else:
2063 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2064
2065 def create_config_drive_iso(self, user_data):
2066 tmpdir = tempfile.mkdtemp()
2067 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2068 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2069 os.makedirs(latest_dir)
2070 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2071 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2072 userdata_file_obj.write(user_data)
2073 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2074 "launch_index": 0,
2075 "name": "ConfigDrive",
2076 "uuid": str(uuid.uuid4())}
2077 )
2078 )
2079 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2080 iso_path=iso_path, source_dir_path=tmpdir)
2081 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2082 try:
2083 FNULL = open(os.devnull, 'w')
2084 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2085 except subprocess.CalledProcessError as e:
2086 shutil.rmtree(tmpdir, ignore_errors=True)
2087 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2088 self.logger.error(error_msg)
2089 raise Exception(error_msg)
2090 return iso_path
2091
2092 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2093 if not os.path.isfile(iso_file_path):
2094 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2095 self.logger.error(error_msg)
2096 raise Exception(error_msg)
2097 iso_file_stat = os.stat(iso_file_path)
2098 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2099 <Media
2100 xmlns="http://www.vmware.com/vcloud/v1.5"
2101 name="{iso_name}"
2102 size="{iso_size}"
2103 imageType="iso">
2104 <Description>ISO image for config-drive</Description>
2105 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2106 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2107 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2108 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2109 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2110 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2111
2112 if response.status_code != 201:
2113 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2114 self.logger.error(error_msg)
2115 raise Exception(error_msg)
2116
2117 catalogItem = XmlElementTree.fromstring(response.text)
2118 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2119 entity_href = entity.get('href')
2120
2121 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2122 if response.status_code != 200:
2123 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2124
2125 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2126 if match:
2127 media_upload_href = match.group(1)
2128 else:
2129 raise Exception('Could not parse the upload URL for the media file from the last response')
2130 upload_iso_task = self.get_task_from_response(response.text)
2131 headers['Content-Type'] = 'application/octet-stream'
2132 response = self.perform_request(req_type='PUT',
2133 url=media_upload_href,
2134 headers=headers,
2135 data=open(iso_file_path, 'rb'))
2136
2137 if response.status_code != 200:
2138 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2139 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2140 if result.get('status') != 'success':
2141 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2142
2143 def get_vcd_availibility_zones(self,respool_href, headers):
2144 """ Method to find presence of av zone is VIM resource pool
2145
2146 Args:
2147 respool_href - resource pool href
2148 headers - header information
2149
2150 Returns:
2151 vcd_az - list of azone present in vCD
2152 """
2153 vcd_az = []
2154 url=respool_href
2155 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2156
2157 if resp.status_code != requests.codes.ok:
2158 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2159 else:
2160 #Get the href to hostGroups and find provided hostGroup is present in it
2161 resp_xml = XmlElementTree.fromstring(resp.content)
2162 for child in resp_xml:
2163 if 'VMWProviderVdcResourcePool' in child.tag:
2164 for schild in child:
2165 if 'Link' in schild.tag:
2166 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2167 hostGroup = schild.attrib.get('href')
2168 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2169 if hg_resp.status_code != requests.codes.ok:
2170 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2171 else:
2172 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2173 for hostGroup in hg_resp_xml:
2174 if 'HostGroup' in hostGroup.tag:
2175 #append host group name to the list
2176 vcd_az.append(hostGroup.attrib.get("name"))
2177 return vcd_az
2178
2179
2180 def set_availability_zones(self):
2181 """
2182 Set vim availability zone
2183 """
2184
2185 vim_availability_zones = None
2186 availability_zone = None
2187 if 'availability_zone' in self.config:
2188 vim_availability_zones = self.config.get('availability_zone')
2189 if isinstance(vim_availability_zones, str):
2190 availability_zone = [vim_availability_zones]
2191 elif isinstance(vim_availability_zones, list):
2192 availability_zone = vim_availability_zones
2193 else:
2194 return availability_zone
2195
2196 return availability_zone
2197
2198
2199 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2200 """
2201 Return the availability zone to be used by the created VM.
2202 returns: The VIM availability zone to be used or None
2203 """
2204 if availability_zone_index is None:
2205 if not self.config.get('availability_zone'):
2206 return None
2207 elif isinstance(self.config.get('availability_zone'), str):
2208 return self.config['availability_zone']
2209 else:
2210 return self.config['availability_zone'][0]
2211
2212 vim_availability_zones = self.availability_zone
2213
2214 # check if VIM offer enough availability zones describe in the VNFD
2215 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2216 # check if all the names of NFV AV match VIM AV names
2217 match_by_index = False
2218 for av in availability_zone_list:
2219 if av not in vim_availability_zones:
2220 match_by_index = True
2221 break
2222 if match_by_index:
2223 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2224 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2225 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2226 self.logger.debug("VIM Availability zones will be used by index")
2227 return vim_availability_zones[availability_zone_index]
2228 else:
2229 return availability_zone_list[availability_zone_index]
2230 else:
2231 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2232
2233
2234 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2235 """ Method to create VM to Host Affinity rule in vCD
2236
2237 Args:
2238 addrule_href - href to make a POST request
2239 vmgrpname - name of the VM group created
2240 hostgrpnmae - name of the host group created earlier
2241 polarity - Affinity or Anti-affinity (default: Affinity)
2242 headers - headers to make REST call
2243
2244 Returns:
2245 True- if rule is created
2246 False- Failed to create rule due to some error
2247
2248 """
2249 task_status = False
2250 rule_name = polarity + "_" + vmgrpname
2251 payload = """<?xml version="1.0" encoding="UTF-8"?>
2252 <vmext:VMWVmHostAffinityRule
2253 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2254 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2255 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2256 <vcloud:Name>{}</vcloud:Name>
2257 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2258 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2259 <vcloud:Polarity>{}</vcloud:Polarity>
2260 <vmext:HostGroupName>{}</vmext:HostGroupName>
2261 <vmext:VmGroupName>{}</vmext:VmGroupName>
2262 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2263
2264 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2265
2266 if resp.status_code != requests.codes.accepted:
2267 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2268 task_status = False
2269 return task_status
2270 else:
2271 affinity_task = self.get_task_from_response(resp.content)
2272 self.logger.debug ("affinity_task: {}".format(affinity_task))
2273 if affinity_task is None or affinity_task is False:
2274 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2275 # wait for task to complete
2276 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2277 if result.get('status') == 'success':
2278 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2279 return True
2280 else:
2281 raise vimconn.vimconnUnexpectedResponse(
2282 "failed to create affinity rule {}".format(rule_name))
2283
2284
2285 def get_add_rule_reference (self, respool_href, headers):
2286 """ This method finds href to add vm to host affinity rule to vCD
2287
2288 Args:
2289 respool_href- href to resource pool
2290 headers- header information to make REST call
2291
2292 Returns:
2293 None - if no valid href to add rule found or
2294 addrule_href - href to add vm to host affinity rule of resource pool
2295 """
2296 addrule_href = None
2297 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2298
2299 if resp.status_code != requests.codes.ok:
2300 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2301 else:
2302
2303 resp_xml = XmlElementTree.fromstring(resp.content)
2304 for child in resp_xml:
2305 if 'VMWProviderVdcResourcePool' in child.tag:
2306 for schild in child:
2307 if 'Link' in schild.tag:
2308 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2309 schild.attrib.get('rel') == "add":
2310 addrule_href = schild.attrib.get('href')
2311 break
2312
2313 return addrule_href
2314
2315
2316 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2317 """ Method to add deployed VM to newly created VM Group.
2318 This is required to create VM to Host affinity in vCD
2319
2320 Args:
2321 vm_uuid- newly created vm uuid
2322 vmGroupNameURL- URL to VM Group name
2323 vmGroup_name- Name of VM group created
2324 headers- Headers for REST request
2325
2326 Returns:
2327 True- if VM added to VM group successfully
2328 False- if any error encounter
2329 """
2330
2331 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2332
2333 if addvm_resp.status_code != requests.codes.ok:
2334 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2335 .format(vmGroupNameURL, addvm_resp.status_code))
2336 return False
2337 else:
2338 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2339 for child in resp_xml:
2340 if child.tag.split('}')[1] == 'Link':
2341 if child.attrib.get("rel") == "addVms":
2342 addvmtogrpURL = child.attrib.get("href")
2343
2344 #Get vm details
2345 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2346 vmdetailsURL = ''.join(url_list)
2347
2348 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2349
2350 if resp.status_code != requests.codes.ok:
2351 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2352 return False
2353
2354 #Parse VM details
2355 resp_xml = XmlElementTree.fromstring(resp.content)
2356 if resp_xml.tag.split('}')[1] == "Vm":
2357 vm_id = resp_xml.attrib.get("id")
2358 vm_name = resp_xml.attrib.get("name")
2359 vm_href = resp_xml.attrib.get("href")
2360 #print vm_id, vm_name, vm_href
2361 #Add VM into VMgroup
2362 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2363 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2364 xmlns="http://www.vmware.com/vcloud/versions" \
2365 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2366 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2367 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2368 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2369 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2370 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2371 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2372 <ns2:VmReference href="{}" id="{}" name="{}" \
2373 type="application/vnd.vmware.vcloud.vm+xml" />\
2374 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2375
2376 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2377
2378 if addvmtogrp_resp.status_code != requests.codes.accepted:
2379 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2380 return False
2381 else:
2382 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2383 return True
2384
2385
2386 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2387 """Method to create a VM group in vCD
2388
2389 Args:
2390 vmgroup_name : Name of VM group to be created
2391 vmgroup_href : href for vmgroup
2392 headers- Headers for REST request
2393 """
2394 #POST to add URL with required data
2395 vmgroup_status = False
2396 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2397 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2398 <vmCount>1</vmCount>\
2399 </VMWVmGroup>""".format(vmgroup_name)
2400 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2401
2402 if resp.status_code != requests.codes.accepted:
2403 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2404 return vmgroup_status
2405 else:
2406 vmgroup_task = self.get_task_from_response(resp.content)
2407 if vmgroup_task is None or vmgroup_task is False:
2408 raise vimconn.vimconnUnexpectedResponse(
2409 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2410
2411 # wait for task to complete
2412 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2413
2414 if result.get('status') == 'success':
2415 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2416 #time.sleep(10)
2417 vmgroup_status = True
2418 return vmgroup_status
2419 else:
2420 raise vimconn.vimconnUnexpectedResponse(\
2421 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2422
2423
2424 def find_vmgroup_reference(self, url, headers):
2425 """ Method to create a new VMGroup which is required to add created VM
2426 Args:
2427 url- resource pool href
2428 headers- header information
2429
2430 Returns:
2431 returns href to VM group to create VM group
2432 """
2433 #Perform GET on resource pool to find 'add' link to create VMGroup
2434 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2435 vmgrp_href = None
2436 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2437
2438 if resp.status_code != requests.codes.ok:
2439 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2440 else:
2441 #Get the href to add vmGroup to vCD
2442 resp_xml = XmlElementTree.fromstring(resp.content)
2443 for child in resp_xml:
2444 if 'VMWProviderVdcResourcePool' in child.tag:
2445 for schild in child:
2446 if 'Link' in schild.tag:
2447 #Find href with type VMGroup and rel with add
2448 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2449 and schild.attrib.get('rel') == "add":
2450 vmgrp_href = schild.attrib.get('href')
2451 return vmgrp_href
2452
2453
2454 def check_availibility_zone(self, az, respool_href, headers):
2455 """ Method to verify requested av zone is present or not in provided
2456 resource pool
2457
2458 Args:
2459 az - name of hostgroup (availibility_zone)
2460 respool_href - Resource Pool href
2461 headers - Headers to make REST call
2462 Returns:
2463 az_found - True if availibility_zone is found else False
2464 """
2465 az_found = False
2466 headers['Accept']='application/*+xml;version=27.0'
2467 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2468
2469 if resp.status_code != requests.codes.ok:
2470 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2471 else:
2472 #Get the href to hostGroups and find provided hostGroup is present in it
2473 resp_xml = XmlElementTree.fromstring(resp.content)
2474
2475 for child in resp_xml:
2476 if 'VMWProviderVdcResourcePool' in child.tag:
2477 for schild in child:
2478 if 'Link' in schild.tag:
2479 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2480 hostGroup_href = schild.attrib.get('href')
2481 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2482 if hg_resp.status_code != requests.codes.ok:
2483 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2484 else:
2485 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2486 for hostGroup in hg_resp_xml:
2487 if 'HostGroup' in hostGroup.tag:
2488 if hostGroup.attrib.get("name") == az:
2489 az_found = True
2490 break
2491 return az_found
2492
2493
2494 def get_pvdc_for_org(self, org_vdc, headers):
2495 """ This method gets provider vdc references from organisation
2496
2497 Args:
2498 org_vdc - name of the organisation VDC to find pvdc
2499 headers - headers to make REST call
2500
2501 Returns:
2502 None - if no pvdc href found else
2503 pvdc_href - href to pvdc
2504 """
2505
2506 #Get provider VDC references from vCD
2507 pvdc_href = None
2508 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2509 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2510 url = ''.join(url_list)
2511
2512 response = self.perform_request(req_type='GET',url=url, headers=headers)
2513 if response.status_code != requests.codes.ok:
2514 self.logger.debug ("REST API call {} failed. Return status code {}"\
2515 .format(url, response.status_code))
2516 else:
2517 xmlroot_response = XmlElementTree.fromstring(response.text)
2518 for child in xmlroot_response:
2519 if 'ProviderVdcReference' in child.tag:
2520 pvdc_href = child.attrib.get('href')
2521 #Get vdcReferences to find org
2522 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2523 if pvdc_resp.status_code != requests.codes.ok:
2524 raise vimconn.vimconnException("REST API call {} failed. "\
2525 "Return status code {}"\
2526 .format(url, pvdc_resp.status_code))
2527
2528 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2529 for child in pvdc_resp_xml:
2530 if 'Link' in child.tag:
2531 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2532 vdc_href = child.attrib.get('href')
2533
2534 #Check if provided org is present in vdc
2535 vdc_resp = self.perform_request(req_type='GET',
2536 url=vdc_href,
2537 headers=headers)
2538 if vdc_resp.status_code != requests.codes.ok:
2539 raise vimconn.vimconnException("REST API call {} failed. "\
2540 "Return status code {}"\
2541 .format(url, vdc_resp.status_code))
2542 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2543 for child in vdc_resp_xml:
2544 if 'VdcReference' in child.tag:
2545 if child.attrib.get('name') == org_vdc:
2546 return pvdc_href
2547
2548
2549 def get_resource_pool_details(self, pvdc_href, headers):
2550 """ Method to get resource pool information.
2551 Host groups are property of resource group.
2552 To get host groups, we need to GET details of resource pool.
2553
2554 Args:
2555 pvdc_href: href to pvdc details
2556 headers: headers
2557
2558 Returns:
2559 respool_href - Returns href link reference to resource pool
2560 """
2561 respool_href = None
2562 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2563
2564 if resp.status_code != requests.codes.ok:
2565 self.logger.debug ("REST API call {} failed. Return status code {}"\
2566 .format(pvdc_href, resp.status_code))
2567 else:
2568 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2569 for child in respool_resp_xml:
2570 if 'Link' in child.tag:
2571 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2572 respool_href = child.attrib.get("href")
2573 break
2574 return respool_href
2575
2576
2577 def log_message(self, msg):
2578 """
2579 Method to log error messages related to Affinity rule creation
2580 in new_vminstance & raise Exception
2581 Args :
2582 msg - Error message to be logged
2583
2584 """
2585 #get token to connect vCD as a normal user
2586 self.get_token()
2587 self.logger.debug(msg)
2588 raise vimconn.vimconnException(msg)
2589
2590
2591 ##
2592 ##
2593 ## based on current discussion
2594 ##
2595 ##
2596 ## server:
2597 # created: '2016-09-08T11:51:58'
2598 # description: simple-instance.linux1.1
2599 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2600 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2601 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2602 # status: ACTIVE
2603 # error_msg:
2604 # interfaces: …
2605 #
2606 def get_vminstance(self, vim_vm_uuid=None):
2607 """Returns the VM instance information from VIM"""
2608
2609 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2610
2611 org, vdc = self.get_vdc_details()
2612 if vdc is None:
2613 raise vimconn.vimconnConnectionException(
2614 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2615
2616 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2617 if not vm_info_dict:
2618 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2619 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2620
2621 status_key = vm_info_dict['status']
2622 error = ''
2623 try:
2624 vm_dict = {'created': vm_info_dict['created'],
2625 'description': vm_info_dict['name'],
2626 'status': vcdStatusCode2manoFormat[int(status_key)],
2627 'hostId': vm_info_dict['vmuuid'],
2628 'error_msg': error,
2629 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2630
2631 if 'interfaces' in vm_info_dict:
2632 vm_dict['interfaces'] = vm_info_dict['interfaces']
2633 else:
2634 vm_dict['interfaces'] = []
2635 except KeyError:
2636 vm_dict = {'created': '',
2637 'description': '',
2638 'status': vcdStatusCode2manoFormat[int(-1)],
2639 'hostId': vm_info_dict['vmuuid'],
2640 'error_msg': "Inconsistency state",
2641 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2642
2643 return vm_dict
2644
2645 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2646 """Method poweroff and remove VM instance from vcloud director network.
2647
2648 Args:
2649 vm__vim_uuid: VM UUID
2650
2651 Returns:
2652 Returns the instance identifier
2653 """
2654
2655 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2656
2657 org, vdc = self.get_vdc_details()
2658 vdc_obj = VDC(self.client, href=vdc.get('href'))
2659 if vdc_obj is None:
2660 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2661 self.tenant_name))
2662 raise vimconn.vimconnException(
2663 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2664
2665 try:
2666 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2667 if vapp_name is None:
2668 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2669 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2670 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2671 vapp_resource = vdc_obj.get_vapp(vapp_name)
2672 vapp = VApp(self.client, resource=vapp_resource)
2673
2674 # Delete vApp and wait for status change if task executed and vApp is None.
2675
2676 if vapp:
2677 if vapp_resource.get('deployed') == 'true':
2678 self.logger.info("Powering off vApp {}".format(vapp_name))
2679 #Power off vApp
2680 powered_off = False
2681 wait_time = 0
2682 while wait_time <= MAX_WAIT_TIME:
2683 power_off_task = vapp.power_off()
2684 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2685
2686 if result.get('status') == 'success':
2687 powered_off = True
2688 break
2689 else:
2690 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2691 time.sleep(INTERVAL_TIME)
2692
2693 wait_time +=INTERVAL_TIME
2694 if not powered_off:
2695 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2696 else:
2697 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2698
2699 #Undeploy vApp
2700 self.logger.info("Undeploy vApp {}".format(vapp_name))
2701 wait_time = 0
2702 undeployed = False
2703 while wait_time <= MAX_WAIT_TIME:
2704 vapp = VApp(self.client, resource=vapp_resource)
2705 if not vapp:
2706 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2707 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2708 undeploy_task = vapp.undeploy()
2709
2710 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2711 if result.get('status') == 'success':
2712 undeployed = True
2713 break
2714 else:
2715 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2716 time.sleep(INTERVAL_TIME)
2717
2718 wait_time +=INTERVAL_TIME
2719
2720 if not undeployed:
2721 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2722
2723 # delete vapp
2724 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2725
2726 if vapp is not None:
2727 wait_time = 0
2728 result = False
2729
2730 while wait_time <= MAX_WAIT_TIME:
2731 vapp = VApp(self.client, resource=vapp_resource)
2732 if not vapp:
2733 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2734 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2735
2736 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2737
2738 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2739 if result.get('status') == 'success':
2740 break
2741 else:
2742 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2743 time.sleep(INTERVAL_TIME)
2744
2745 wait_time +=INTERVAL_TIME
2746
2747 if result is None:
2748 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2749 else:
2750 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2751 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2752 catalog_list = self.get_image_list()
2753 try:
2754 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2755 if catalog_['name'] == config_drive_catalog_name][0]
2756 except IndexError:
2757 pass
2758 if config_drive_catalog_id:
2759 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2760 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2761 self.delete_image(config_drive_catalog_id)
2762 return vm__vim_uuid
2763 except:
2764 self.logger.debug(traceback.format_exc())
2765 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2766
2767
2768 def refresh_vms_status(self, vm_list):
2769 """Get the status of the virtual machines and their interfaces/ports
2770 Params: the list of VM identifiers
2771 Returns a dictionary with:
2772 vm_id: #VIM id of this Virtual Machine
2773 status: #Mandatory. Text with one of:
2774 # DELETED (not found at vim)
2775 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2776 # OTHER (Vim reported other status not understood)
2777 # ERROR (VIM indicates an ERROR status)
2778 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2779 # CREATING (on building process), ERROR
2780 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2781 #
2782 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2783 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2784 interfaces:
2785 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2786 mac_address: #Text format XX:XX:XX:XX:XX:XX
2787 vim_net_id: #network id where this interface is connected
2788 vim_interface_id: #interface/port VIM id
2789 ip_address: #null, or text with IPv4, IPv6 address
2790 """
2791
2792 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2793
2794 org,vdc = self.get_vdc_details()
2795 if vdc is None:
2796 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2797
2798 vms_dict = {}
2799 nsx_edge_list = []
2800 for vmuuid in vm_list:
2801 vapp_name = self.get_namebyvappid(vmuuid)
2802 if vapp_name is not None:
2803
2804 try:
2805 vm_pci_details = self.get_vm_pci_details(vmuuid)
2806 vdc_obj = VDC(self.client, href=vdc.get('href'))
2807 vapp_resource = vdc_obj.get_vapp(vapp_name)
2808 the_vapp = VApp(self.client, resource=vapp_resource)
2809
2810 vm_details = {}
2811 for vm in the_vapp.get_all_vms():
2812 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2813 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2814 response = self.perform_request(req_type='GET',
2815 url=vm.get('href'),
2816 headers=headers)
2817
2818 if response.status_code != 200:
2819 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2820 "status code : {}".format(vm.get('href'),
2821 response.text,
2822 response.status_code))
2823 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2824 "VM details")
2825 xmlroot = XmlElementTree.fromstring(response.text)
2826
2827 result = response.text.replace("\n", " ")
2828 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2829 if hdd_match:
2830 hdd_mb = hdd_match.group(1)
2831 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2832 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2833 if cpus_match:
2834 cpus = cpus_match.group(1)
2835 vm_details['cpus'] = int(cpus) if cpus else None
2836 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2837 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2838 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2839 vm_details['id'] = xmlroot.get('id')
2840 vm_details['name'] = xmlroot.get('name')
2841 vm_info = [vm_details]
2842 if vm_pci_details:
2843 vm_info[0].update(vm_pci_details)
2844
2845 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2846 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2847 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2848
2849 # get networks
2850 vm_ip = None
2851 vm_mac = None
2852 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2853 for network in networks:
2854 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2855 vm_mac = mac_s.group(1) if mac_s else None
2856 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2857 vm_ip = ip_s.group(1) if ip_s else None
2858
2859 if vm_ip is None:
2860 if not nsx_edge_list:
2861 nsx_edge_list = self.get_edge_details()
2862 if nsx_edge_list is None:
2863 raise vimconn.vimconnException("refresh_vms_status:"\
2864 "Failed to get edge details from NSX Manager")
2865 if vm_mac is not None:
2866 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2867
2868 net_s = re.search('network="(.*?)"',network)
2869 network_name = net_s.group(1) if net_s else None
2870
2871 vm_net_id = self.get_network_id_by_name(network_name)
2872 interface = {"mac_address": vm_mac,
2873 "vim_net_id": vm_net_id,
2874 "vim_interface_id": vm_net_id,
2875 "ip_address": vm_ip}
2876
2877 vm_dict["interfaces"].append(interface)
2878
2879 # add a vm to vm dict
2880 vms_dict.setdefault(vmuuid, vm_dict)
2881 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2882 except Exception as exp:
2883 self.logger.debug("Error in response {}".format(exp))
2884 self.logger.debug(traceback.format_exc())
2885
2886 return vms_dict
2887
2888
2889 def get_edge_details(self):
2890 """Get the NSX edge list from NSX Manager
2891 Returns list of NSX edges
2892 """
2893 edge_list = []
2894 rheaders = {'Content-Type': 'application/xml'}
2895 nsx_api_url = '/api/4.0/edges'
2896
2897 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2898
2899 try:
2900 resp = requests.get(self.nsx_manager + nsx_api_url,
2901 auth = (self.nsx_user, self.nsx_password),
2902 verify = False, headers = rheaders)
2903 if resp.status_code == requests.codes.ok:
2904 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2905 for edge_pages in paged_Edge_List:
2906 if edge_pages.tag == 'edgePage':
2907 for edge_summary in edge_pages:
2908 if edge_summary.tag == 'pagingInfo':
2909 for element in edge_summary:
2910 if element.tag == 'totalCount' and element.text == '0':
2911 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2912 .format(self.nsx_manager))
2913
2914 if edge_summary.tag == 'edgeSummary':
2915 for element in edge_summary:
2916 if element.tag == 'id':
2917 edge_list.append(element.text)
2918 else:
2919 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2920 .format(self.nsx_manager))
2921
2922 if not edge_list:
2923 raise vimconn.vimconnException("get_edge_details: "\
2924 "No NSX edge details found: {}"
2925 .format(self.nsx_manager))
2926 else:
2927 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2928 return edge_list
2929 else:
2930 self.logger.debug("get_edge_details: "
2931 "Failed to get NSX edge details from NSX Manager: {}"
2932 .format(resp.content))
2933 return None
2934
2935 except Exception as exp:
2936 self.logger.debug("get_edge_details: "\
2937 "Failed to get NSX edge details from NSX Manager: {}"
2938 .format(exp))
2939 raise vimconn.vimconnException("get_edge_details: "\
2940 "Failed to get NSX edge details from NSX Manager: {}"
2941 .format(exp))
2942
2943
2944 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2945 """Get IP address details from NSX edges, using the MAC address
2946 PARAMS: nsx_edges : List of NSX edges
2947 mac_address : Find IP address corresponding to this MAC address
2948 Returns: IP address corrresponding to the provided MAC address
2949 """
2950
2951 ip_addr = None
2952 rheaders = {'Content-Type': 'application/xml'}
2953
2954 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2955
2956 try:
2957 for edge in nsx_edges:
2958 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2959
2960 resp = requests.get(self.nsx_manager + nsx_api_url,
2961 auth = (self.nsx_user, self.nsx_password),
2962 verify = False, headers = rheaders)
2963
2964 if resp.status_code == requests.codes.ok:
2965 dhcp_leases = XmlElementTree.fromstring(resp.text)
2966 for child in dhcp_leases:
2967 if child.tag == 'dhcpLeaseInfo':
2968 dhcpLeaseInfo = child
2969 for leaseInfo in dhcpLeaseInfo:
2970 for elem in leaseInfo:
2971 if (elem.tag)=='macAddress':
2972 edge_mac_addr = elem.text
2973 if (elem.tag)=='ipAddress':
2974 ip_addr = elem.text
2975 if edge_mac_addr is not None:
2976 if edge_mac_addr == mac_address:
2977 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2978 .format(ip_addr, mac_address,edge))
2979 return ip_addr
2980 else:
2981 self.logger.debug("get_ipaddr_from_NSXedge: "\
2982 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2983 .format(resp.content))
2984
2985 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2986 return None
2987
2988 except XmlElementTree.ParseError as Err:
2989 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2990
2991 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2992 """Send and action over a VM instance from VIM
2993 Returns the vm_id if the action was successfully sent to the VIM"""
2994
2995 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2996 if vm__vim_uuid is None or action_dict is None:
2997 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2998
2999 org, vdc = self.get_vdc_details()
3000 if vdc is None:
3001 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3002
3003 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3004 if vapp_name is None:
3005 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3006 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3007 else:
3008 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3009
3010 try:
3011 vdc_obj = VDC(self.client, href=vdc.get('href'))
3012 vapp_resource = vdc_obj.get_vapp(vapp_name)
3013 vapp = VApp(self.client, resource=vapp_resource)
3014 if "start" in action_dict:
3015 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3016 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3017 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3018 self.instance_actions_result("start", result, vapp_name)
3019 elif "rebuild" in action_dict:
3020 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3021 rebuild_task = vapp.deploy(power_on=True)
3022 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3023 self.instance_actions_result("rebuild", result, vapp_name)
3024 elif "pause" in action_dict:
3025 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3026 pause_task = vapp.undeploy(action='suspend')
3027 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3028 self.instance_actions_result("pause", result, vapp_name)
3029 elif "resume" in action_dict:
3030 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3031 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3032 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3033 self.instance_actions_result("resume", result, vapp_name)
3034 elif "shutoff" in action_dict or "shutdown" in action_dict:
3035 action_name , value = list(action_dict.items())[0]
3036 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3037 shutdown_task = vapp.shutdown()
3038 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3039 if action_name == "shutdown":
3040 self.instance_actions_result("shutdown", result, vapp_name)
3041 else:
3042 self.instance_actions_result("shutoff", result, vapp_name)
3043 elif "forceOff" in action_dict:
3044 result = vapp.undeploy(action='powerOff')
3045 self.instance_actions_result("forceOff", result, vapp_name)
3046 elif "reboot" in action_dict:
3047 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3048 reboot_task = vapp.reboot()
3049 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3050 else:
3051 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3052 return vm__vim_uuid
3053 except Exception as exp :
3054 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3055 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3056
3057 def instance_actions_result(self, action, result, vapp_name):
3058 if result.get('status') == 'success':
3059 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3060 else:
3061 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3062
3063 def get_vminstance_console(self, vm_id, console_type="novnc"):
3064 """
3065 Get a console for the virtual machine
3066 Params:
3067 vm_id: uuid of the VM
3068 console_type, can be:
3069 "novnc" (by default), "xvpvnc" for VNC types,
3070 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3071 Returns dict with the console parameters:
3072 protocol: ssh, ftp, http, https, ...
3073 server: usually ip address
3074 port: the http, ssh, ... port
3075 suffix: extra text, e.g. the http path and query string
3076 """
3077 console_dict = {}
3078
3079 if console_type==None or console_type=='novnc':
3080
3081 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3082
3083 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3084 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3085 response = self.perform_request(req_type='POST',
3086 url=url_rest_call,
3087 headers=headers)
3088
3089 if response.status_code == 403:
3090 response = self.retry_rest('GET', url_rest_call)
3091
3092 if response.status_code != 200:
3093 self.logger.error("REST call {} failed reason : {}"\
3094 "status code : {}".format(url_rest_call,
3095 response.text,
3096 response.status_code))
3097 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3098 "VM Mks ticket details")
3099 s = re.search("<Host>(.*?)</Host>", response.text)
3100 console_dict['server'] = s.group(1) if s else None
3101 s1 = re.search("<Port>(\d+)</Port>", response.text)
3102 console_dict['port'] = s1.group(1) if s1 else None
3103
3104
3105 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3106
3107 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3108 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3109 response = self.perform_request(req_type='POST',
3110 url=url_rest_call,
3111 headers=headers)
3112
3113 if response.status_code == 403:
3114 response = self.retry_rest('GET', url_rest_call)
3115
3116 if response.status_code != 200:
3117 self.logger.error("REST call {} failed reason : {}"\
3118 "status code : {}".format(url_rest_call,
3119 response.text,
3120 response.status_code))
3121 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3122 "VM console details")
3123 s = re.search(">.*?/(vm-\d+.*)</", response.text)
3124 console_dict['suffix'] = s.group(1) if s else None
3125 console_dict['protocol'] = "https"
3126
3127 return console_dict
3128
3129 # NOT USED METHODS in current version
3130
3131 def host_vim2gui(self, host, server_dict):
3132 """Transform host dictionary from VIM format to GUI format,
3133 and append to the server_dict
3134 """
3135 raise vimconn.vimconnNotImplemented("Should have implemented this")
3136
3137 def get_hosts_info(self):
3138 """Get the information of deployed hosts
3139 Returns the hosts content"""
3140 raise vimconn.vimconnNotImplemented("Should have implemented this")
3141
3142 def get_hosts(self, vim_tenant):
3143 """Get the hosts and deployed instances
3144 Returns the hosts content"""
3145 raise vimconn.vimconnNotImplemented("Should have implemented this")
3146
3147 def get_processor_rankings(self):
3148 """Get the processor rankings in the VIM database"""
3149 raise vimconn.vimconnNotImplemented("Should have implemented this")
3150
3151 def new_host(self, host_data):
3152 """Adds a new host to VIM"""
3153 '''Returns status code of the VIM response'''
3154 raise vimconn.vimconnNotImplemented("Should have implemented this")
3155
3156 def new_external_port(self, port_data):
3157 """Adds a external port to VIM"""
3158 '''Returns the port identifier'''
3159 raise vimconn.vimconnNotImplemented("Should have implemented this")
3160
3161 def new_external_network(self, net_name, net_type):
3162 """Adds a external network to VIM (shared)"""
3163 '''Returns the network identifier'''
3164 raise vimconn.vimconnNotImplemented("Should have implemented this")
3165
3166 def connect_port_network(self, port_id, network_id, admin=False):
3167 """Connects a external port to a network"""
3168 '''Returns status code of the VIM response'''
3169 raise vimconn.vimconnNotImplemented("Should have implemented this")
3170
3171 def new_vminstancefromJSON(self, vm_data):
3172 """Adds a VM instance to VIM"""
3173 '''Returns the instance identifier'''
3174 raise vimconn.vimconnNotImplemented("Should have implemented this")
3175
3176 def get_network_name_by_id(self, network_uuid=None):
3177 """Method gets vcloud director network named based on supplied uuid.
3178
3179 Args:
3180 network_uuid: network_id
3181
3182 Returns:
3183 The return network name.
3184 """
3185
3186 if not network_uuid:
3187 return None
3188
3189 try:
3190 org_dict = self.get_org(self.org_uuid)
3191 if 'networks' in org_dict:
3192 org_network_dict = org_dict['networks']
3193 for net_uuid in org_network_dict:
3194 if net_uuid == network_uuid:
3195 return org_network_dict[net_uuid]
3196 except:
3197 self.logger.debug("Exception in get_network_name_by_id")
3198 self.logger.debug(traceback.format_exc())
3199
3200 return None
3201
3202 def get_network_id_by_name(self, network_name=None):
3203 """Method gets vcloud director network uuid based on supplied name.
3204
3205 Args:
3206 network_name: network_name
3207 Returns:
3208 The return network uuid.
3209 network_uuid: network_id
3210 """
3211 if not network_name:
3212 self.logger.debug("get_network_id_by_name() : Network name is empty")
3213 return None
3214
3215 try:
3216 org_dict = self.get_org(self.org_uuid)
3217 if org_dict and 'networks' in org_dict:
3218 org_network_dict = org_dict['networks']
3219 for net_uuid, net_name in org_network_dict.items():
3220 if net_name == network_name:
3221 return net_uuid
3222
3223 except KeyError as exp:
3224 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3225
3226 return None
3227
3228 def get_physical_network_by_name(self, physical_network_name):
3229 '''
3230 Methos returns uuid of physical network which passed
3231 Args:
3232 physical_network_name: physical network name
3233 Returns:
3234 UUID of physical_network_name
3235 '''
3236 try:
3237 client_as_admin = self.connect_as_admin()
3238 if not client_as_admin:
3239 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3240 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3241 vm_list_rest_call = ''.join(url_list)
3242
3243 if client_as_admin._session:
3244 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3245 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3246
3247 response = self.perform_request(req_type='GET',
3248 url=vm_list_rest_call,
3249 headers=headers)
3250
3251 provider_network = None
3252 available_network = None
3253 add_vdc_rest_url = None
3254
3255 if response.status_code != requests.codes.ok:
3256 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3257 response.status_code))
3258 return None
3259 else:
3260 try:
3261 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3262 for child in vm_list_xmlroot:
3263
3264 if child.tag.split("}")[1] == 'ProviderVdcReference':
3265 provider_network = child.attrib.get('href')
3266 # application/vnd.vmware.admin.providervdc+xml
3267 if child.tag.split("}")[1] == 'Link':
3268 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3269 and child.attrib.get('rel') == 'add':
3270 add_vdc_rest_url = child.attrib.get('href')
3271 except:
3272 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3273 self.logger.debug("Respond body {}".format(response.text))
3274 return None
3275
3276 # find pvdc provided available network
3277 response = self.perform_request(req_type='GET',
3278 url=provider_network,
3279 headers=headers)
3280
3281 if response.status_code != requests.codes.ok:
3282 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3283 response.status_code))
3284 return None
3285
3286 try:
3287 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3288 for child in vm_list_xmlroot.iter():
3289 if child.tag.split("}")[1] == 'AvailableNetworks':
3290 for networks in child.iter():
3291 if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
3292 if networks.attrib.get('name') == physical_network_name:
3293 network_url = networks.attrib.get('href')
3294 available_network = network_url[network_url.rindex('/')+1:]
3295 break
3296 except Exception as e:
3297 return None
3298
3299 return available_network
3300 except Exception as e:
3301 self.logger.error("Error while getting physical network: {}".format(e))
3302
3303 def list_org_action(self):
3304 """
3305 Method leverages vCloud director and query for available organization for particular user
3306
3307 Args:
3308 vca - is active VCA connection.
3309 vdc_name - is a vdc name that will be used to query vms action
3310
3311 Returns:
3312 The return XML respond
3313 """
3314 url_list = [self.url, '/api/org']
3315 vm_list_rest_call = ''.join(url_list)
3316
3317 if self.client._session:
3318 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3319 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3320
3321 response = self.perform_request(req_type='GET',
3322 url=vm_list_rest_call,
3323 headers=headers)
3324
3325 if response.status_code == 403:
3326 response = self.retry_rest('GET', vm_list_rest_call)
3327
3328 if response.status_code == requests.codes.ok:
3329 return response.text
3330
3331 return None
3332
3333 def get_org_action(self, org_uuid=None):
3334 """
3335 Method leverages vCloud director and retrieve available object for organization.
3336
3337 Args:
3338 org_uuid - vCD organization uuid
3339 self.client - is active connection.
3340
3341 Returns:
3342 The return XML respond
3343 """
3344
3345 if org_uuid is None:
3346 return None
3347
3348 url_list = [self.url, '/api/org/', org_uuid]
3349 vm_list_rest_call = ''.join(url_list)
3350
3351 if self.client._session:
3352 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3353 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3354
3355 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3356 response = self.perform_request(req_type='GET',
3357 url=vm_list_rest_call,
3358 headers=headers)
3359 if response.status_code == 403:
3360 response = self.retry_rest('GET', vm_list_rest_call)
3361
3362 if response.status_code == requests.codes.ok:
3363 return response.text
3364 return None
3365
3366 def get_org(self, org_uuid=None):
3367 """
3368 Method retrieves available organization in vCloud Director
3369
3370 Args:
3371 org_uuid - is a organization uuid.
3372
3373 Returns:
3374 The return dictionary with following key
3375 "network" - for network list under the org
3376 "catalogs" - for network list under the org
3377 "vdcs" - for vdc list under org
3378 """
3379
3380 org_dict = {}
3381
3382 if org_uuid is None:
3383 return org_dict
3384
3385 content = self.get_org_action(org_uuid=org_uuid)
3386 try:
3387 vdc_list = {}
3388 network_list = {}
3389 catalog_list = {}
3390 vm_list_xmlroot = XmlElementTree.fromstring(content)
3391 for child in vm_list_xmlroot:
3392 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3393 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3394 org_dict['vdcs'] = vdc_list
3395 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3396 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3397 org_dict['networks'] = network_list
3398 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3399 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3400 org_dict['catalogs'] = catalog_list
3401 except:
3402 pass
3403
3404 return org_dict
3405
3406 def get_org_list(self):
3407 """
3408 Method retrieves available organization in vCloud Director
3409
3410 Args:
3411 vca - is active VCA connection.
3412
3413 Returns:
3414 The return dictionary and key for each entry VDC UUID
3415 """
3416
3417 org_dict = {}
3418
3419 content = self.list_org_action()
3420 try:
3421 vm_list_xmlroot = XmlElementTree.fromstring(content)
3422 for vm_xml in vm_list_xmlroot:
3423 if vm_xml.tag.split("}")[1] == 'Org':
3424 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3425 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3426 except:
3427 pass
3428
3429 return org_dict
3430
3431 def vms_view_action(self, vdc_name=None):
3432 """ Method leverages vCloud director vms query call
3433
3434 Args:
3435 vca - is active VCA connection.
3436 vdc_name - is a vdc name that will be used to query vms action
3437
3438 Returns:
3439 The return XML respond
3440 """
3441 vca = self.connect()
3442 if vdc_name is None:
3443 return None
3444
3445 url_list = [vca.host, '/api/vms/query']
3446 vm_list_rest_call = ''.join(url_list)
3447
3448 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3449 refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
3450 ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3451 if len(refs) == 1:
3452 response = Http.get(url=vm_list_rest_call,
3453 headers=vca.vcloud_session.get_vcloud_headers(),
3454 verify=vca.verify,
3455 logger=vca.logger)
3456 if response.status_code == requests.codes.ok:
3457 return response.text
3458
3459 return None
3460
3461 def get_vapp_list(self, vdc_name=None):
3462 """
3463 Method retrieves vApp list deployed vCloud director and returns a dictionary
3464 contains a list of all vapp deployed for queried VDC.
3465 The key for a dictionary is vApp UUID
3466
3467
3468 Args:
3469 vca - is active VCA connection.
3470 vdc_name - is a vdc name that will be used to query vms action
3471
3472 Returns:
3473 The return dictionary and key for each entry vapp UUID
3474 """
3475
3476 vapp_dict = {}
3477 if vdc_name is None:
3478 return vapp_dict
3479
3480 content = self.vms_view_action(vdc_name=vdc_name)
3481 try:
3482 vm_list_xmlroot = XmlElementTree.fromstring(content)
3483 for vm_xml in vm_list_xmlroot:
3484 if vm_xml.tag.split("}")[1] == 'VMRecord':
3485 if vm_xml.attrib['isVAppTemplate'] == 'true':
3486 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3487 if 'vappTemplate-' in rawuuid[0]:
3488 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3489 # vm and use raw UUID as key
3490 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3491 except:
3492 pass
3493
3494 return vapp_dict
3495
3496 def get_vm_list(self, vdc_name=None):
3497 """
3498 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3499 contains a list of all VM's deployed for queried VDC.
3500 The key for a dictionary is VM UUID
3501
3502
3503 Args:
3504 vca - is active VCA connection.
3505 vdc_name - is a vdc name that will be used to query vms action
3506
3507 Returns:
3508 The return dictionary and key for each entry vapp UUID
3509 """
3510 vm_dict = {}
3511
3512 if vdc_name is None:
3513 return vm_dict
3514
3515 content = self.vms_view_action(vdc_name=vdc_name)
3516 try:
3517 vm_list_xmlroot = XmlElementTree.fromstring(content)
3518 for vm_xml in vm_list_xmlroot:
3519 if vm_xml.tag.split("}")[1] == 'VMRecord':
3520 if vm_xml.attrib['isVAppTemplate'] == 'false':
3521 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3522 if 'vm-' in rawuuid[0]:
3523 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3524 # vm and use raw UUID as key
3525 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3526 except:
3527 pass
3528
3529 return vm_dict
3530
3531 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3532 """
3533 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3534 contains a list of all VM's deployed for queried VDC.
3535 The key for a dictionary is VM UUID
3536
3537
3538 Args:
3539 vca - is active VCA connection.
3540 vdc_name - is a vdc name that will be used to query vms action
3541
3542 Returns:
3543 The return dictionary and key for each entry vapp UUID
3544 """
3545 vm_dict = {}
3546 vca = self.connect()
3547 if not vca:
3548 raise vimconn.vimconnConnectionException("self.connect() is failed")
3549
3550 if vdc_name is None:
3551 return vm_dict
3552
3553 content = self.vms_view_action(vdc_name=vdc_name)
3554 try:
3555 vm_list_xmlroot = XmlElementTree.fromstring(content)
3556 for vm_xml in vm_list_xmlroot:
3557 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3558 # lookup done by UUID
3559 if isuuid:
3560 if vapp_name in vm_xml.attrib['container']:
3561 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3562 if 'vm-' in rawuuid[0]:
3563 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3564 break
3565 # lookup done by Name
3566 else:
3567 if vapp_name in vm_xml.attrib['name']:
3568 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3569 if 'vm-' in rawuuid[0]:
3570 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3571 break
3572 except:
3573 pass
3574
3575 return vm_dict
3576
3577 def get_network_action(self, network_uuid=None):
3578 """
3579 Method leverages vCloud director and query network based on network uuid
3580
3581 Args:
3582 vca - is active VCA connection.
3583 network_uuid - is a network uuid
3584
3585 Returns:
3586 The return XML respond
3587 """
3588
3589 if network_uuid is None:
3590 return None
3591
3592 url_list = [self.url, '/api/network/', network_uuid]
3593 vm_list_rest_call = ''.join(url_list)
3594
3595 if self.client._session:
3596 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3597 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3598
3599 response = self.perform_request(req_type='GET',
3600 url=vm_list_rest_call,
3601 headers=headers)
3602 #Retry login if session expired & retry sending request
3603 if response.status_code == 403:
3604 response = self.retry_rest('GET', vm_list_rest_call)
3605
3606 if response.status_code == requests.codes.ok:
3607 return response.text
3608
3609 return None
3610
3611 def get_vcd_network(self, network_uuid=None):
3612 """
3613 Method retrieves available network from vCloud Director
3614
3615 Args:
3616 network_uuid - is VCD network UUID
3617
3618 Each element serialized as key : value pair
3619
3620 Following keys available for access. network_configuration['Gateway'}
3621 <Configuration>
3622 <IpScopes>
3623 <IpScope>
3624 <IsInherited>true</IsInherited>
3625 <Gateway>172.16.252.100</Gateway>
3626 <Netmask>255.255.255.0</Netmask>
3627 <Dns1>172.16.254.201</Dns1>
3628 <Dns2>172.16.254.202</Dns2>
3629 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3630 <IsEnabled>true</IsEnabled>
3631 <IpRanges>
3632 <IpRange>
3633 <StartAddress>172.16.252.1</StartAddress>
3634 <EndAddress>172.16.252.99</EndAddress>
3635 </IpRange>
3636 </IpRanges>
3637 </IpScope>
3638 </IpScopes>
3639 <FenceMode>bridged</FenceMode>
3640
3641 Returns:
3642 The return dictionary and key for each entry vapp UUID
3643 """
3644
3645 network_configuration = {}
3646 if network_uuid is None:
3647 return network_uuid
3648
3649 try:
3650 content = self.get_network_action(network_uuid=network_uuid)
3651 if content is not None:
3652 vm_list_xmlroot = XmlElementTree.fromstring(content)
3653
3654 network_configuration['status'] = vm_list_xmlroot.get("status")
3655 network_configuration['name'] = vm_list_xmlroot.get("name")
3656 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3657
3658 for child in vm_list_xmlroot:
3659 if child.tag.split("}")[1] == 'IsShared':
3660 network_configuration['isShared'] = child.text.strip()
3661 if child.tag.split("}")[1] == 'Configuration':
3662 for configuration in child.iter():
3663 tagKey = configuration.tag.split("}")[1].strip()
3664 if tagKey != "":
3665 network_configuration[tagKey] = configuration.text.strip()
3666 except Exception as exp :
3667 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3668 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3669
3670 return network_configuration
3671
3672 def delete_network_action(self, network_uuid=None):
3673 """
3674 Method delete given network from vCloud director
3675
3676 Args:
3677 network_uuid - is a network uuid that client wish to delete
3678
3679 Returns:
3680 The return None or XML respond or false
3681 """
3682 client = self.connect_as_admin()
3683 if not client:
3684 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3685 if network_uuid is None:
3686 return False
3687
3688 url_list = [self.url, '/api/admin/network/', network_uuid]
3689 vm_list_rest_call = ''.join(url_list)
3690
3691 if client._session:
3692 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3693 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3694 response = self.perform_request(req_type='DELETE',
3695 url=vm_list_rest_call,
3696 headers=headers)
3697 if response.status_code == 202:
3698 return True
3699
3700 return False
3701
3702 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3703 ip_profile=None, isshared='true'):
3704 """
3705 Method create network in vCloud director
3706
3707 Args:
3708 network_name - is network name to be created.
3709 net_type - can be 'bridge','data','ptp','mgmt'.
3710 ip_profile is a dict containing the IP parameters of the network
3711 isshared - is a boolean
3712 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3713 It optional attribute. by default if no parent network indicate the first available will be used.
3714
3715 Returns:
3716 The return network uuid or return None
3717 """
3718
3719 new_network_name = [network_name, '-', str(uuid.uuid4())]
3720 content = self.create_network_rest(network_name=''.join(new_network_name),
3721 ip_profile=ip_profile,
3722 net_type=net_type,
3723 parent_network_uuid=parent_network_uuid,
3724 isshared=isshared)
3725 if content is None:
3726 self.logger.debug("Failed create network {}.".format(network_name))
3727 return None
3728
3729 try:
3730 vm_list_xmlroot = XmlElementTree.fromstring(content)
3731 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3732 if len(vcd_uuid) == 4:
3733 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3734 return vcd_uuid[3]
3735 except:
3736 self.logger.debug("Failed create network {}".format(network_name))
3737 return None
3738
3739 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3740 ip_profile=None, isshared='true'):
3741 """
3742 Method create network in vCloud director
3743
3744 Args:
3745 network_name - is network name to be created.
3746 net_type - can be 'bridge','data','ptp','mgmt'.
3747 ip_profile is a dict containing the IP parameters of the network
3748 isshared - is a boolean
3749 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3750 It optional attribute. by default if no parent network indicate the first available will be used.
3751
3752 Returns:
3753 The return network uuid or return None
3754 """
3755 client_as_admin = self.connect_as_admin()
3756 if not client_as_admin:
3757 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3758 if network_name is None:
3759 return None
3760
3761 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3762 vm_list_rest_call = ''.join(url_list)
3763
3764 if client_as_admin._session:
3765 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3766 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3767
3768 response = self.perform_request(req_type='GET',
3769 url=vm_list_rest_call,
3770 headers=headers)
3771
3772 provider_network = None
3773 available_networks = None
3774 add_vdc_rest_url = None
3775
3776 if response.status_code != requests.codes.ok:
3777 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3778 response.status_code))
3779 return None
3780 else:
3781 try:
3782 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3783 for child in vm_list_xmlroot:
3784
3785 if child.tag.split("}")[1] == 'ProviderVdcReference':
3786 provider_network = child.attrib.get('href')
3787 # application/vnd.vmware.admin.providervdc+xml
3788 if child.tag.split("}")[1] == 'Link':
3789 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3790 and child.attrib.get('rel') == 'add':
3791 add_vdc_rest_url = child.attrib.get('href')
3792 except:
3793 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3794 self.logger.debug("Respond body {}".format(response.text))
3795 return None
3796
3797 # find pvdc provided available network
3798 response = self.perform_request(req_type='GET',
3799 url=provider_network,
3800 headers=headers)
3801
3802 if response.status_code != requests.codes.ok:
3803 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3804 response.status_code))
3805 return None
3806
3807 if parent_network_uuid is None:
3808 try:
3809 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3810 for child in vm_list_xmlroot.iter():
3811 if child.tag.split("}")[1] == 'AvailableNetworks':
3812 for networks in child.iter():
3813 # application/vnd.vmware.admin.network+xml
3814 if networks.attrib.get('href') is not None:
3815 available_networks = networks.attrib.get('href')
3816 break
3817 except:
3818 return None
3819
3820 try:
3821 #Configure IP profile of the network
3822 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3823
3824 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3825 subnet_rand = random.randint(0, 255)
3826 ip_base = "192.168.{}.".format(subnet_rand)
3827 ip_profile['subnet_address'] = ip_base + "0/24"
3828 else:
3829 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3830
3831 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3832 ip_profile['gateway_address']=ip_base + "1"
3833 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3834 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3835 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3836 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3837 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3838 ip_profile['dhcp_start_address']=ip_base + "3"
3839 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3840 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3841 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3842 ip_profile['dns_address']=ip_base + "2"
3843
3844 gateway_address=ip_profile['gateway_address']
3845 dhcp_count=int(ip_profile['dhcp_count'])
3846 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3847
3848 if ip_profile['dhcp_enabled']==True:
3849 dhcp_enabled='true'
3850 else:
3851 dhcp_enabled='false'
3852 dhcp_start_address=ip_profile['dhcp_start_address']
3853
3854 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3855 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3856 end_ip_int += dhcp_count - 1
3857 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3858
3859 ip_version=ip_profile['ip_version']
3860 dns_address=ip_profile['dns_address']
3861 except KeyError as exp:
3862 self.logger.debug("Create Network REST: Key error {}".format(exp))
3863 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3864
3865 # either use client provided UUID or search for a first available
3866 # if both are not defined we return none
3867 if parent_network_uuid is not None:
3868 provider_network = None
3869 available_networks = None
3870 add_vdc_rest_url = None
3871
3872 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3873 add_vdc_rest_url = ''.join(url_list)
3874
3875 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3876 available_networks = ''.join(url_list)
3877
3878 #Creating all networks as Direct Org VDC type networks.
3879 #Unused in case of Underlay (data/ptp) network interface.
3880 fence_mode="isolated"
3881 is_inherited='false'
3882 dns_list = dns_address.split(";")
3883 dns1 = dns_list[0]
3884 dns2_text = ""
3885 if len(dns_list) >= 2:
3886 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3887 if net_type == "isolated":
3888 fence_mode="isolated"
3889 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3890 <Description>Openmano created</Description>
3891 <Configuration>
3892 <IpScopes>
3893 <IpScope>
3894 <IsInherited>{1:s}</IsInherited>
3895 <Gateway>{2:s}</Gateway>
3896 <Netmask>{3:s}</Netmask>
3897 <Dns1>{4:s}</Dns1>{5:s}
3898 <IsEnabled>{6:s}</IsEnabled>
3899 <IpRanges>
3900 <IpRange>
3901 <StartAddress>{7:s}</StartAddress>
3902 <EndAddress>{8:s}</EndAddress>
3903 </IpRange>
3904 </IpRanges>
3905 </IpScope>
3906 </IpScopes>
3907 <FenceMode>{9:s}</FenceMode>
3908 </Configuration>
3909 <IsShared>{10:s}</IsShared>
3910 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3911 subnet_address, dns1, dns2_text, dhcp_enabled,
3912 dhcp_start_address, dhcp_end_address,
3913 fence_mode, isshared)
3914 else:
3915 fence_mode = "bridged"
3916 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3917 <Description>Openmano created</Description>
3918 <Configuration>
3919 <IpScopes>
3920 <IpScope>
3921 <IsInherited>{1:s}</IsInherited>
3922 <Gateway>{2:s}</Gateway>
3923 <Netmask>{3:s}</Netmask>
3924 <Dns1>{4:s}</Dns1>{5:s}
3925 <IsEnabled>{6:s}</IsEnabled>
3926 <IpRanges>
3927 <IpRange>
3928 <StartAddress>{7:s}</StartAddress>
3929 <EndAddress>{8:s}</EndAddress>
3930 </IpRange>
3931 </IpRanges>
3932 </IpScope>
3933 </IpScopes>
3934 <ParentNetwork href="{9:s}"/>
3935 <FenceMode>{10:s}</FenceMode>
3936 </Configuration>
3937 <IsShared>{11:s}</IsShared>
3938 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3939 subnet_address, dns1, dns2_text, dhcp_enabled,
3940 dhcp_start_address, dhcp_end_address, available_networks,
3941 fence_mode, isshared)
3942
3943 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3944 try:
3945 response = self.perform_request(req_type='POST',
3946 url=add_vdc_rest_url,
3947 headers=headers,
3948 data=data)
3949
3950 if response.status_code != 201:
3951 self.logger.debug("Create Network POST REST API call failed. Return status code {}, response.text: {}"
3952 .format(response.status_code, response.text))
3953 else:
3954 network_task = self.get_task_from_response(response.text)
3955 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3956 time.sleep(5)
3957 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3958 if result.get('status') == 'success':
3959 return response.text
3960 else:
3961 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3962 .format(response.text))
3963 except Exception as exp:
3964 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3965
3966 return None
3967
3968 def convert_cidr_to_netmask(self, cidr_ip=None):
3969 """
3970 Method sets convert CIDR netmask address to normal IP format
3971 Args:
3972 cidr_ip : CIDR IP address
3973 Returns:
3974 netmask : Converted netmask
3975 """
3976 if cidr_ip is not None:
3977 if '/' in cidr_ip:
3978 network, net_bits = cidr_ip.split('/')
3979 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3980 else:
3981 netmask = cidr_ip
3982 return netmask
3983 return None
3984
3985 def get_provider_rest(self, vca=None):
3986 """
3987 Method gets provider vdc view from vcloud director
3988
3989 Args:
3990 network_name - is network name to be created.
3991 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3992 It optional attribute. by default if no parent network indicate the first available will be used.
3993
3994 Returns:
3995 The return xml content of respond or None
3996 """
3997
3998 url_list = [self.url, '/api/admin']
3999 if vca:
4000 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4001 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4002 response = self.perform_request(req_type='GET',
4003 url=''.join(url_list),
4004 headers=headers)
4005
4006 if response.status_code == requests.codes.ok:
4007 return response.text
4008 return None
4009
4010 def create_vdc(self, vdc_name=None):
4011
4012 vdc_dict = {}
4013
4014 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4015 if xml_content is not None:
4016 try:
4017 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4018 for child in task_resp_xmlroot:
4019 if child.tag.split("}")[1] == 'Owner':
4020 vdc_id = child.attrib.get('href').split("/")[-1]
4021 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
4022 return vdc_dict
4023 except:
4024 self.logger.debug("Respond body {}".format(xml_content))
4025
4026 return None
4027
4028 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4029 """
4030 Method create vdc in vCloud director based on VDC template.
4031 it uses pre-defined template.
4032
4033 Args:
4034 vdc_name - name of a new vdc.
4035
4036 Returns:
4037 The return xml content of respond or None
4038 """
4039 # pre-requesite atleast one vdc template should be available in vCD
4040 self.logger.info("Creating new vdc {}".format(vdc_name))
4041 vca = self.connect_as_admin()
4042 if not vca:
4043 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4044 if vdc_name is None:
4045 return None
4046
4047 url_list = [self.url, '/api/vdcTemplates']
4048 vm_list_rest_call = ''.join(url_list)
4049
4050 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4051 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4052 response = self.perform_request(req_type='GET',
4053 url=vm_list_rest_call,
4054 headers=headers)
4055
4056 # container url to a template
4057 vdc_template_ref = None
4058 try:
4059 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4060 for child in vm_list_xmlroot:
4061 # application/vnd.vmware.admin.providervdc+xml
4062 # we need find a template from witch we instantiate VDC
4063 if child.tag.split("}")[1] == 'VdcTemplate':
4064 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4065 vdc_template_ref = child.attrib.get('href')
4066 except:
4067 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4068 self.logger.debug("Respond body {}".format(response.text))
4069 return None
4070
4071 # if we didn't found required pre defined template we return None
4072 if vdc_template_ref is None:
4073 return None
4074
4075 try:
4076 # instantiate vdc
4077 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4078 vm_list_rest_call = ''.join(url_list)
4079 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4080 <Source href="{1:s}"></Source>
4081 <Description>opnemano</Description>
4082 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4083
4084 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4085
4086 response = self.perform_request(req_type='POST',
4087 url=vm_list_rest_call,
4088 headers=headers,
4089 data=data)
4090
4091 vdc_task = self.get_task_from_response(response.text)
4092 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4093
4094 # if we all ok we respond with content otherwise by default None
4095 if response.status_code >= 200 and response.status_code < 300:
4096 return response.text
4097 return None
4098 except:
4099 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4100 self.logger.debug("Respond body {}".format(response.text))
4101
4102 return None
4103
4104 def create_vdc_rest(self, vdc_name=None):
4105 """
4106 Method create network in vCloud director
4107
4108 Args:
4109 vdc_name - vdc name to be created
4110 Returns:
4111 The return response
4112 """
4113
4114 self.logger.info("Creating new vdc {}".format(vdc_name))
4115
4116 vca = self.connect_as_admin()
4117 if not vca:
4118 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4119 if vdc_name is None:
4120 return None
4121
4122 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4123 vm_list_rest_call = ''.join(url_list)
4124
4125 if vca._session:
4126 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4127 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4128 response = self.perform_request(req_type='GET',
4129 url=vm_list_rest_call,
4130 headers=headers)
4131
4132 provider_vdc_ref = None
4133 add_vdc_rest_url = None
4134 available_networks = None
4135
4136 if response.status_code != requests.codes.ok:
4137 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4138 response.status_code))
4139 return None
4140 else:
4141 try:
4142 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4143 for child in vm_list_xmlroot:
4144 # application/vnd.vmware.admin.providervdc+xml
4145 if child.tag.split("}")[1] == 'Link':
4146 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4147 and child.attrib.get('rel') == 'add':
4148 add_vdc_rest_url = child.attrib.get('href')
4149 except:
4150 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4151 self.logger.debug("Respond body {}".format(response.text))
4152 return None
4153
4154 response = self.get_provider_rest(vca=vca)
4155 try:
4156 vm_list_xmlroot = XmlElementTree.fromstring(response)
4157 for child in vm_list_xmlroot:
4158 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4159 for sub_child in child:
4160 provider_vdc_ref = sub_child.attrib.get('href')
4161 except:
4162 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4163 self.logger.debug("Respond body {}".format(response))
4164 return None
4165
4166 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4167 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4168 <AllocationModel>ReservationPool</AllocationModel>
4169 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4170 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4171 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4172 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4173 <ProviderVdcReference
4174 name="Main Provider"
4175 href="{2:s}" />
4176 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4177 escape(vdc_name),
4178 provider_vdc_ref)
4179
4180 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4181
4182 response = self.perform_request(req_type='POST',
4183 url=add_vdc_rest_url,
4184 headers=headers,
4185 data=data)
4186
4187 # if we all ok we respond with content otherwise by default None
4188 if response.status_code == 201:
4189 return response.text
4190 return None
4191
4192 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4193 """
4194 Method retrieve vapp detail from vCloud director
4195
4196 Args:
4197 vapp_uuid - is vapp identifier.
4198
4199 Returns:
4200 The return network uuid or return None
4201 """
4202
4203 parsed_respond = {}
4204 vca = None
4205
4206 if need_admin_access:
4207 vca = self.connect_as_admin()
4208 else:
4209 vca = self.client
4210
4211 if not vca:
4212 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4213 if vapp_uuid is None:
4214 return None
4215
4216 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4217 get_vapp_restcall = ''.join(url_list)
4218
4219 if vca._session:
4220 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4221 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4222 response = self.perform_request(req_type='GET',
4223 url=get_vapp_restcall,
4224 headers=headers)
4225
4226 if response.status_code == 403:
4227 if need_admin_access == False:
4228 response = self.retry_rest('GET', get_vapp_restcall)
4229
4230 if response.status_code != requests.codes.ok:
4231 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4232 response.status_code))
4233 return parsed_respond
4234
4235 try:
4236 xmlroot_respond = XmlElementTree.fromstring(response.text)
4237 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4238
4239 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4240 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4241 'vmw': 'http://www.vmware.com/schema/ovf',
4242 'vm': 'http://www.vmware.com/vcloud/v1.5',
4243 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4244 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4245 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4246 }
4247
4248 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4249 if created_section is not None:
4250 parsed_respond['created'] = created_section.text
4251
4252 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4253 if network_section is not None and 'networkName' in network_section.attrib:
4254 parsed_respond['networkname'] = network_section.attrib['networkName']
4255
4256 ipscopes_section = \
4257 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4258 namespaces)
4259 if ipscopes_section is not None:
4260 for ipscope in ipscopes_section:
4261 for scope in ipscope:
4262 tag_key = scope.tag.split("}")[1]
4263 if tag_key == 'IpRanges':
4264 ip_ranges = scope.getchildren()
4265 for ipblock in ip_ranges:
4266 for block in ipblock:
4267 parsed_respond[block.tag.split("}")[1]] = block.text
4268 else:
4269 parsed_respond[tag_key] = scope.text
4270
4271 # parse children section for other attrib
4272 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4273 if children_section is not None:
4274 parsed_respond['name'] = children_section.attrib['name']
4275 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4276 if "nestedHypervisorEnabled" in children_section.attrib else None
4277 parsed_respond['deployed'] = children_section.attrib['deployed']
4278 parsed_respond['status'] = children_section.attrib['status']
4279 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4280 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4281 nic_list = []
4282 for adapters in network_adapter:
4283 adapter_key = adapters.tag.split("}")[1]
4284 if adapter_key == 'PrimaryNetworkConnectionIndex':
4285 parsed_respond['primarynetwork'] = adapters.text
4286 if adapter_key == 'NetworkConnection':
4287 vnic = {}
4288 if 'network' in adapters.attrib:
4289 vnic['network'] = adapters.attrib['network']
4290 for adapter in adapters:
4291 setting_key = adapter.tag.split("}")[1]
4292 vnic[setting_key] = adapter.text
4293 nic_list.append(vnic)
4294
4295 for link in children_section:
4296 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4297 if link.attrib['rel'] == 'screen:acquireTicket':
4298 parsed_respond['acquireTicket'] = link.attrib
4299 if link.attrib['rel'] == 'screen:acquireMksTicket':
4300 parsed_respond['acquireMksTicket'] = link.attrib
4301
4302 parsed_respond['interfaces'] = nic_list
4303 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4304 if vCloud_extension_section is not None:
4305 vm_vcenter_info = {}
4306 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4307 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4308 if vmext is not None:
4309 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4310 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4311
4312 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4313 vm_virtual_hardware_info = {}
4314 if virtual_hardware_section is not None:
4315 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4316 if item.find("rasd:Description",namespaces).text == "Hard disk":
4317 disk_size = item.find("rasd:HostResource" ,namespaces
4318 ).attrib["{"+namespaces['vm']+"}capacity"]
4319
4320 vm_virtual_hardware_info["disk_size"]= disk_size
4321 break
4322
4323 for link in virtual_hardware_section:
4324 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4325 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4326 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4327 break
4328
4329 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4330 except Exception as exp :
4331 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4332 return parsed_respond
4333
4334 def acquire_console(self, vm_uuid=None):
4335
4336 if vm_uuid is None:
4337 return None
4338 if self.client._session:
4339 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4340 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4341 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4342 console_dict = vm_dict['acquireTicket']
4343 console_rest_call = console_dict['href']
4344
4345 response = self.perform_request(req_type='POST',
4346 url=console_rest_call,
4347 headers=headers)
4348
4349 if response.status_code == 403:
4350 response = self.retry_rest('POST', console_rest_call)
4351
4352 if response.status_code == requests.codes.ok:
4353 return response.text
4354
4355 return None
4356
4357 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4358 """
4359 Method retrieve vm disk details
4360
4361 Args:
4362 vapp_uuid - is vapp identifier.
4363 flavor_disk - disk size as specified in VNFD (flavor)
4364
4365 Returns:
4366 The return network uuid or return None
4367 """
4368 status = None
4369 try:
4370 #Flavor disk is in GB convert it into MB
4371 flavor_disk = int(flavor_disk) * 1024
4372 vm_details = self.get_vapp_details_rest(vapp_uuid)
4373 if vm_details:
4374 vm_name = vm_details["name"]
4375 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4376
4377 if vm_details and "vm_virtual_hardware" in vm_details:
4378 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4379 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4380
4381 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4382
4383 if flavor_disk > vm_disk:
4384 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4385 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4386 vm_disk, flavor_disk ))
4387 else:
4388 status = True
4389 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4390
4391 return status
4392 except Exception as exp:
4393 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4394
4395
4396 def modify_vm_disk_rest(self, disk_href , disk_size):
4397 """
4398 Method retrieve modify vm disk size
4399
4400 Args:
4401 disk_href - vCD API URL to GET and PUT disk data
4402 disk_size - disk size as specified in VNFD (flavor)
4403
4404 Returns:
4405 The return network uuid or return None
4406 """
4407 if disk_href is None or disk_size is None:
4408 return None
4409
4410 if self.client._session:
4411 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4412 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4413 response = self.perform_request(req_type='GET',
4414 url=disk_href,
4415 headers=headers)
4416
4417 if response.status_code == 403:
4418 response = self.retry_rest('GET', disk_href)
4419
4420 if response.status_code != requests.codes.ok:
4421 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4422 response.status_code))
4423 return None
4424 try:
4425 lxmlroot_respond = lxmlElementTree.fromstring(response.text)
4426 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
4427 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4428
4429 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4430 if item.find("rasd:Description",namespaces).text == "Hard disk":
4431 disk_item = item.find("rasd:HostResource" ,namespaces )
4432 if disk_item is not None:
4433 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4434 break
4435
4436 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4437 xml_declaration=True)
4438
4439 #Send PUT request to modify disk size
4440 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4441
4442 response = self.perform_request(req_type='PUT',
4443 url=disk_href,
4444 headers=headers,
4445 data=data)
4446 if response.status_code == 403:
4447 add_headers = {'Content-Type': headers['Content-Type']}
4448 response = self.retry_rest('PUT', disk_href, add_headers, data)
4449
4450 if response.status_code != 202:
4451 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4452 response.status_code))
4453 else:
4454 modify_disk_task = self.get_task_from_response(response.text)
4455 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4456 if result.get('status') == 'success':
4457 return True
4458 else:
4459 return False
4460 return None
4461
4462 except Exception as exp:
4463 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4464 return None
4465
4466 def add_serial_device(self, vapp_uuid):
4467 """
4468 Method to attach a serial device to a VM
4469
4470 Args:
4471 vapp_uuid - uuid of vApp/VM
4472
4473 Returns:
4474 """
4475 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
4476 _, content = self.get_vcenter_content()
4477 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4478 if vm_moref_id:
4479 try:
4480 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4481 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4482 if host_obj and vm_obj:
4483 spec = vim.vm.ConfigSpec()
4484 spec.deviceChange = []
4485 serial_spec = vim.vm.device.VirtualDeviceSpec()
4486 serial_spec.operation = 'add'
4487 serial_port = vim.vm.device.VirtualSerialPort()
4488 serial_port.yieldOnPoll = True
4489 backing = serial_port.URIBackingInfo()
4490 backing.serviceURI = 'tcp://:65500'
4491 backing.direction = 'server'
4492 serial_port.backing = backing
4493 serial_spec.device = serial_port
4494 spec.deviceChange.append(serial_spec)
4495 vm_obj.ReconfigVM_Task(spec=spec)
4496
4497 self.logger.info("Adding serial device to VM {}".format(vm_obj))
4498 except vmodl.MethodFault as error:
4499 self.logger.error("Error occurred while adding PCI devices {} ", error)
4500
4501 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
4502 """
4503 Method to attach pci devices to VM
4504
4505 Args:
4506 vapp_uuid - uuid of vApp/VM
4507 pci_devices - pci devices infromation as specified in VNFD (flavor)
4508
4509 Returns:
4510 The status of add pci device task , vm object and
4511 vcenter_conect object
4512 """
4513 vm_obj = None
4514 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid))
4515 vcenter_conect, content = self.get_vcenter_content()
4516 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4517
4518 if vm_moref_id:
4519 try:
4520 no_of_pci_devices = len(pci_devices)
4521 if no_of_pci_devices > 0:
4522 #Get VM and its host
4523 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4524 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4525 if host_obj and vm_obj:
4526 #get PCI devies from host on which vapp is currently installed
4527 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4528
4529 if avilable_pci_devices is None:
4530 #find other hosts with active pci devices
4531 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4532 content,
4533 no_of_pci_devices
4534 )
4535
4536 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4537 #Migrate vm to the host where PCI devices are availble
4538 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4539 task = self.relocate_vm(new_host_obj, vm_obj)
4540 if task is not None:
4541 result = self.wait_for_vcenter_task(task, vcenter_conect)
4542 self.logger.info("Migrate VM status: {}".format(result))
4543 host_obj = new_host_obj
4544 else:
4545 self.logger.info("Fail to migrate VM : {}".format(result))
4546 raise vimconn.vimconnNotFoundException(
4547 "Fail to migrate VM : {} to host {}".format(
4548 vmname_andid,
4549 new_host_obj)
4550 )
4551
4552 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4553 #Add PCI devices one by one
4554 for pci_device in avilable_pci_devices:
4555 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4556 if task:
4557 status= self.wait_for_vcenter_task(task, vcenter_conect)
4558 if status:
4559 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4560 else:
4561 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4562 return True, vm_obj, vcenter_conect
4563 else:
4564 self.logger.error("Currently there is no host with"\
4565 " {} number of avaialble PCI devices required for VM {}".format(
4566 no_of_pci_devices,
4567 vmname_andid)
4568 )
4569 raise vimconn.vimconnNotFoundException(
4570 "Currently there is no host with {} "\
4571 "number of avaialble PCI devices required for VM {}".format(
4572 no_of_pci_devices,
4573 vmname_andid))
4574 else:
4575 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4576
4577 except vmodl.MethodFault as error:
4578 self.logger.error("Error occurred while adding PCI devices {} ",error)
4579 return None, vm_obj, vcenter_conect
4580
4581 def get_vm_obj(self, content, mob_id):
4582 """
4583 Method to get the vsphere VM object associated with a given morf ID
4584 Args:
4585 vapp_uuid - uuid of vApp/VM
4586 content - vCenter content object
4587 mob_id - mob_id of VM
4588
4589 Returns:
4590 VM and host object
4591 """
4592 vm_obj = None
4593 host_obj = None
4594 try :
4595 container = content.viewManager.CreateContainerView(content.rootFolder,
4596 [vim.VirtualMachine], True
4597 )
4598 for vm in container.view:
4599 mobID = vm._GetMoId()
4600 if mobID == mob_id:
4601 vm_obj = vm
4602 host_obj = vm_obj.runtime.host
4603 break
4604 except Exception as exp:
4605 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4606 return host_obj, vm_obj
4607
4608 def get_pci_devices(self, host, need_devices):
4609 """
4610 Method to get the details of pci devices on given host
4611 Args:
4612 host - vSphere host object
4613 need_devices - number of pci devices needed on host
4614
4615 Returns:
4616 array of pci devices
4617 """
4618 all_devices = []
4619 all_device_ids = []
4620 used_devices_ids = []
4621
4622 try:
4623 if host:
4624 pciPassthruInfo = host.config.pciPassthruInfo
4625 pciDevies = host.hardware.pciDevice
4626
4627 for pci_status in pciPassthruInfo:
4628 if pci_status.passthruActive:
4629 for device in pciDevies:
4630 if device.id == pci_status.id:
4631 all_device_ids.append(device.id)
4632 all_devices.append(device)
4633
4634 #check if devices are in use
4635 avalible_devices = all_devices
4636 for vm in host.vm:
4637 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4638 vm_devices = vm.config.hardware.device
4639 for device in vm_devices:
4640 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4641 if device.backing.id in all_device_ids:
4642 for use_device in avalible_devices:
4643 if use_device.id == device.backing.id:
4644 avalible_devices.remove(use_device)
4645 used_devices_ids.append(device.backing.id)
4646 self.logger.debug("Device {} from devices {}"\
4647 "is in use".format(device.backing.id,
4648 device)
4649 )
4650 if len(avalible_devices) < need_devices:
4651 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4652 need_devices))
4653 self.logger.debug("found only {} devices {}".format(len(avalible_devices),
4654 avalible_devices))
4655 return None
4656 else:
4657 required_devices = avalible_devices[:need_devices]
4658 self.logger.info("Found {} PCI devices on host {} but required only {}".format(
4659 len(avalible_devices),
4660 host,
4661 need_devices))
4662 self.logger.info("Retruning {} devices as {}".format(need_devices,
4663 required_devices ))
4664 return required_devices
4665
4666 except Exception as exp:
4667 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4668
4669 return None
4670
4671 def get_host_and_PCIdevices(self, content, need_devices):
4672 """
4673 Method to get the details of pci devices infromation on all hosts
4674
4675 Args:
4676 content - vSphere host object
4677 need_devices - number of pci devices needed on host
4678
4679 Returns:
4680 array of pci devices and host object
4681 """
4682 host_obj = None
4683 pci_device_objs = None
4684 try:
4685 if content:
4686 container = content.viewManager.CreateContainerView(content.rootFolder,
4687 [vim.HostSystem], True)
4688 for host in container.view:
4689 devices = self.get_pci_devices(host, need_devices)
4690 if devices:
4691 host_obj = host
4692 pci_device_objs = devices
4693 break
4694 except Exception as exp:
4695 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4696
4697 return host_obj,pci_device_objs
4698
4699 def relocate_vm(self, dest_host, vm) :
4700 """
4701 Method to get the relocate VM to new host
4702
4703 Args:
4704 dest_host - vSphere host object
4705 vm - vSphere VM object
4706
4707 Returns:
4708 task object
4709 """
4710 task = None
4711 try:
4712 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4713 task = vm.Relocate(relocate_spec)
4714 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4715 except Exception as exp:
4716 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4717 dest_host, vm, exp))
4718 return task
4719
4720 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4721 """
4722 Waits and provides updates on a vSphere task
4723 """
4724 while task.info.state == vim.TaskInfo.State.running:
4725 time.sleep(2)
4726
4727 if task.info.state == vim.TaskInfo.State.success:
4728 if task.info.result is not None and not hideResult:
4729 self.logger.info('{} completed successfully, result: {}'.format(
4730 actionName,
4731 task.info.result))
4732 else:
4733 self.logger.info('Task {} completed successfully.'.format(actionName))
4734 else:
4735 self.logger.error('{} did not complete successfully: {} '.format(
4736 actionName,
4737 task.info.error)
4738 )
4739
4740 return task.info.result
4741
4742 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4743 """
4744 Method to add pci device in given VM
4745
4746 Args:
4747 host_object - vSphere host object
4748 vm_object - vSphere VM object
4749 host_pci_dev - host_pci_dev must be one of the devices from the
4750 host_object.hardware.pciDevice list
4751 which is configured as a PCI passthrough device
4752
4753 Returns:
4754 task object
4755 """
4756 task = None
4757 if vm_object and host_object and host_pci_dev:
4758 try :
4759 #Add PCI device to VM
4760 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4761 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4762
4763 if host_pci_dev.id not in systemid_by_pciid:
4764 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4765 return None
4766
4767 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4768 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4769 id=host_pci_dev.id,
4770 systemId=systemid_by_pciid[host_pci_dev.id],
4771 vendorId=host_pci_dev.vendorId,
4772 deviceName=host_pci_dev.deviceName)
4773
4774 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4775
4776 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4777 new_device_config.operation = "add"
4778 vmConfigSpec = vim.vm.ConfigSpec()
4779 vmConfigSpec.deviceChange = [new_device_config]
4780
4781 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4782 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4783 host_pci_dev, vm_object, host_object)
4784 )
4785 except Exception as exp:
4786 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4787 host_pci_dev,
4788 vm_object,
4789 exp))
4790 return task
4791
4792 def get_vm_vcenter_info(self):
4793 """
4794 Method to get details of vCenter and vm
4795
4796 Args:
4797 vapp_uuid - uuid of vApp or VM
4798
4799 Returns:
4800 Moref Id of VM and deails of vCenter
4801 """
4802 vm_vcenter_info = {}
4803
4804 if self.vcenter_ip is not None:
4805 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4806 else:
4807 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4808 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4809 if self.vcenter_port is not None:
4810 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4811 else:
4812 raise vimconn.vimconnException(message="vCenter port is not provided."\
4813 " Please provide vCenter port while attaching datacenter to tenant in --config")
4814 if self.vcenter_user is not None:
4815 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4816 else:
4817 raise vimconn.vimconnException(message="vCenter user is not provided."\
4818 " Please provide vCenter user while attaching datacenter to tenant in --config")
4819
4820 if self.vcenter_password is not None:
4821 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4822 else:
4823 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4824 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4825
4826 return vm_vcenter_info
4827
4828
4829 def get_vm_pci_details(self, vmuuid):
4830 """
4831 Method to get VM PCI device details from vCenter
4832
4833 Args:
4834 vm_obj - vSphere VM object
4835
4836 Returns:
4837 dict of PCI devives attached to VM
4838
4839 """
4840 vm_pci_devices_info = {}
4841 try:
4842 vcenter_conect, content = self.get_vcenter_content()
4843 vm_moref_id = self.get_vm_moref_id(vmuuid)
4844 if vm_moref_id:
4845 #Get VM and its host
4846 if content:
4847 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4848 if host_obj and vm_obj:
4849 vm_pci_devices_info["host_name"]= host_obj.name
4850 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4851 for device in vm_obj.config.hardware.device:
4852 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4853 device_details={'devide_id':device.backing.id,
4854 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4855 }
4856 vm_pci_devices_info[device.deviceInfo.label] = device_details
4857 else:
4858 self.logger.error("Can not connect to vCenter while getting "\
4859 "PCI devices infromationn")
4860 return vm_pci_devices_info
4861 except Exception as exp:
4862 self.logger.error("Error occurred while getting VM infromationn"\
4863 " for VM : {}".format(exp))
4864 raise vimconn.vimconnException(message=exp)
4865
4866
4867 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4868 """
4869 Method to reserve memory for all VMs
4870 Args :
4871 vapp - VApp
4872 memory_mb - Memory in MB
4873 Returns:
4874 None
4875 """
4876
4877 self.logger.info("Reserve memory for all VMs")
4878 for vms in vapp.get_all_vms():
4879 vm_id = vms.get('id').split(':')[-1]
4880
4881 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4882
4883 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4884 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4885 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4886 response = self.perform_request(req_type='GET',
4887 url=url_rest_call,
4888 headers=headers)
4889
4890 if response.status_code == 403:
4891 response = self.retry_rest('GET', url_rest_call)
4892
4893 if response.status_code != 200:
4894 self.logger.error("REST call {} failed reason : {}"\
4895 "status code : {}".format(url_rest_call,
4896 response.text,
4897 response.status_code))
4898 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4899 "memory")
4900
4901 bytexml = bytes(bytearray(response.text, encoding='utf-8'))
4902 contentelem = lxmlElementTree.XML(bytexml)
4903 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
4904 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4905
4906 # Find the reservation element in the response
4907 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4908 for memelem in memelem_list:
4909 memelem.text = str(memory_mb)
4910
4911 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4912
4913 response = self.perform_request(req_type='PUT',
4914 url=url_rest_call,
4915 headers=headers,
4916 data=newdata)
4917
4918 if response.status_code == 403:
4919 add_headers = {'Content-Type': headers['Content-Type']}
4920 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4921
4922 if response.status_code != 202:
4923 self.logger.error("REST call {} failed reason : {}"\
4924 "status code : {} ".format(url_rest_call,
4925 response.text,
4926 response.status_code))
4927 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4928 "virtual hardware memory section")
4929 else:
4930 mem_task = self.get_task_from_response(response.text)
4931 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4932 if result.get('status') == 'success':
4933 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4934 .format(vm_id))
4935 else:
4936 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4937 .format(vm_id))
4938
4939 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4940 """
4941 Configure VApp network config with org vdc network
4942 Args :
4943 vapp - VApp
4944 Returns:
4945 None
4946 """
4947
4948 self.logger.info("Connecting vapp {} to org vdc network {}".
4949 format(vapp_id, net_name))
4950
4951 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4952
4953 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4954 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4955 response = self.perform_request(req_type='GET',
4956 url=url_rest_call,
4957 headers=headers)
4958
4959 if response.status_code == 403:
4960 response = self.retry_rest('GET', url_rest_call)
4961
4962 if response.status_code != 200:
4963 self.logger.error("REST call {} failed reason : {}"\
4964 "status code : {}".format(url_rest_call,
4965 response.text,
4966 response.status_code))
4967 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
4968 "network config section")
4969
4970 data = response.text
4971 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4972 net_id = self.get_network_id_by_name(net_name)
4973 if not net_id:
4974 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
4975 "existing network")
4976
4977 bytexml = bytes(bytearray(data, encoding='utf-8'))
4978 newelem = lxmlElementTree.XML(bytexml)
4979 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
4980 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4981 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4982
4983 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
4984 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
4985 if parentnetworklist:
4986 for pn in parentnetworklist:
4987 if "href" not in pn.keys():
4988 id_val = pn.get("id")
4989 href_val = "{}/api/network/{}".format(self.url, id_val)
4990 pn.set("href", href_val)
4991
4992 newstr = """<NetworkConfig networkName="{}">
4993 <Configuration>
4994 <ParentNetwork href="{}/api/network/{}"/>
4995 <FenceMode>bridged</FenceMode>
4996 </Configuration>
4997 </NetworkConfig>
4998 """.format(net_name, self.url, net_id)
4999 newcfgelem = lxmlElementTree.fromstring(newstr)
5000 if nwcfglist:
5001 nwcfglist[0].addnext(newcfgelem)
5002
5003 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
5004
5005 response = self.perform_request(req_type='PUT',
5006 url=url_rest_call,
5007 headers=headers,
5008 data=newdata)
5009
5010 if response.status_code == 403:
5011 add_headers = {'Content-Type': headers['Content-Type']}
5012 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5013
5014 if response.status_code != 202:
5015 self.logger.error("REST call {} failed reason : {}"\
5016 "status code : {} ".format(url_rest_call,
5017 response.text,
5018 response.status_code))
5019 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
5020 "network config section")
5021 else:
5022 vapp_task = self.get_task_from_response(response.text)
5023 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
5024 if result.get('status') == 'success':
5025 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
5026 "network {}".format(vapp_id, net_name))
5027 else:
5028 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
5029 "connect to network {}".format(vapp_id, net_name))
5030
5031 def remove_primary_network_adapter_from_all_vms(self, vapp):
5032 """
5033 Method to remove network adapter type to vm
5034 Args :
5035 vapp - VApp
5036 Returns:
5037 None
5038 """
5039
5040 self.logger.info("Removing network adapter from all VMs")
5041 for vms in vapp.get_all_vms():
5042 vm_id = vms.get('id').split(':')[-1]
5043
5044 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5045
5046 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5047 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5048 response = self.perform_request(req_type='GET',
5049 url=url_rest_call,
5050 headers=headers)
5051
5052 if response.status_code == 403:
5053 response = self.retry_rest('GET', url_rest_call)
5054
5055 if response.status_code != 200:
5056 self.logger.error("REST call {} failed reason : {}"\
5057 "status code : {}".format(url_rest_call,
5058 response.text,
5059 response.status_code))
5060 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
5061 "network connection section")
5062
5063 data = response.text
5064 data = data.split('<Link rel="edit"')[0]
5065
5066 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5067
5068 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5069 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5070 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5071 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5072 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5073 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5074 xmlns:vmw="http://www.vmware.com/schema/ovf"
5075 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5076 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5077 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5078 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
5079 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5080 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5081 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5082 </NetworkConnectionSection>""".format(url=url_rest_call)
5083 response = self.perform_request(req_type='PUT',
5084 url=url_rest_call,
5085 headers=headers,
5086 data=newdata)
5087
5088 if response.status_code == 403:
5089 add_headers = {'Content-Type': headers['Content-Type']}
5090 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5091
5092 if response.status_code != 202:
5093 self.logger.error("REST call {} failed reason : {}"\
5094 "status code : {} ".format(url_rest_call,
5095 response.text,
5096 response.status_code))
5097 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5098 "network connection section")
5099 else:
5100 nic_task = self.get_task_from_response(response.text)
5101 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5102 if result.get('status') == 'success':
5103 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5104 "default NIC type".format(vm_id))
5105 else:
5106 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5107 "connect NIC type".format(vm_id))
5108
5109 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5110 """
5111 Method to add network adapter type to vm
5112 Args :
5113 network_name - name of network
5114 primary_nic_index - int value for primary nic index
5115 nicIndex - int value for nic index
5116 nic_type - specify model name to which add to vm
5117 Returns:
5118 None
5119 """
5120
5121 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5122 format(network_name, nicIndex, nic_type))
5123 try:
5124 ip_address = None
5125 floating_ip = False
5126 mac_address = None
5127 if 'floating_ip' in net: floating_ip = net['floating_ip']
5128
5129 # Stub for ip_address feature
5130 if 'ip_address' in net: ip_address = net['ip_address']
5131
5132 if 'mac_address' in net: mac_address = net['mac_address']
5133
5134 if floating_ip:
5135 allocation_mode = "POOL"
5136 elif ip_address:
5137 allocation_mode = "MANUAL"
5138 else:
5139 allocation_mode = "DHCP"
5140
5141 if not nic_type:
5142 for vms in vapp.get_all_vms():
5143 vm_id = vms.get('id').split(':')[-1]
5144
5145 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5146
5147 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5148 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5149 response = self.perform_request(req_type='GET',
5150 url=url_rest_call,
5151 headers=headers)
5152
5153 if response.status_code == 403:
5154 response = self.retry_rest('GET', url_rest_call)
5155
5156 if response.status_code != 200:
5157 self.logger.error("REST call {} failed reason : {}"\
5158 "status code : {}".format(url_rest_call,
5159 response.text,
5160 response.status_code))
5161 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5162 "network connection section")
5163
5164 data = response.text
5165 data = data.split('<Link rel="edit"')[0]
5166 if '<PrimaryNetworkConnectionIndex>' not in data:
5167 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5168 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5169 <NetworkConnection network="{}">
5170 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5171 <IsConnected>true</IsConnected>
5172 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5173 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5174 allocation_mode)
5175 # Stub for ip_address feature
5176 if ip_address:
5177 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5178 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5179
5180 if mac_address:
5181 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5182 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5183
5184 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5185 else:
5186 self.logger.debug("add_network_adapter PrimaryNIC in data")
5187 new_item = """<NetworkConnection network="{}">
5188 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5189 <IsConnected>true</IsConnected>
5190 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5191 </NetworkConnection>""".format(network_name, nicIndex,
5192 allocation_mode)
5193 # Stub for ip_address feature
5194 if ip_address:
5195 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5196 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5197
5198 if mac_address:
5199 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5200 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5201
5202 data = data + new_item + '</NetworkConnectionSection>'
5203
5204 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5205
5206 response = self.perform_request(req_type='PUT',
5207 url=url_rest_call,
5208 headers=headers,
5209 data=data)
5210
5211 if response.status_code == 403:
5212 add_headers = {'Content-Type': headers['Content-Type']}
5213 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5214
5215 if response.status_code != 202:
5216 self.logger.error("REST call {} failed reason : {}"\
5217 "status code : {} ".format(url_rest_call,
5218 response.text,
5219 response.status_code))
5220 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5221 "network connection section")
5222 else:
5223 nic_task = self.get_task_from_response(response.text)
5224 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5225 if result.get('status') == 'success':
5226 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5227 "default NIC type".format(vm_id))
5228 else:
5229 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5230 "connect NIC type".format(vm_id))
5231 else:
5232 for vms in vapp.get_all_vms():
5233 vm_id = vms.get('id').split(':')[-1]
5234
5235 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5236
5237 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5238 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5239 response = self.perform_request(req_type='GET',
5240 url=url_rest_call,
5241 headers=headers)
5242
5243 if response.status_code == 403:
5244 response = self.retry_rest('GET', url_rest_call)
5245
5246 if response.status_code != 200:
5247 self.logger.error("REST call {} failed reason : {}"\
5248 "status code : {}".format(url_rest_call,
5249 response.text,
5250 response.status_code))
5251 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5252 "network connection section")
5253 data = response.text
5254 data = data.split('<Link rel="edit"')[0]
5255 vcd_netadapter_type = nic_type
5256 if nic_type in ['SR-IOV', 'VF']:
5257 vcd_netadapter_type = "SRIOVETHERNETCARD"
5258
5259 if '<PrimaryNetworkConnectionIndex>' not in data:
5260 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5261 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5262 <NetworkConnection network="{}">
5263 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5264 <IsConnected>true</IsConnected>
5265 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5266 <NetworkAdapterType>{}</NetworkAdapterType>
5267 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5268 allocation_mode, vcd_netadapter_type)
5269 # Stub for ip_address feature
5270 if ip_address:
5271 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5272 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5273
5274 if mac_address:
5275 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5276 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5277
5278 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5279 else:
5280 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5281 new_item = """<NetworkConnection network="{}">
5282 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5283 <IsConnected>true</IsConnected>
5284 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5285 <NetworkAdapterType>{}</NetworkAdapterType>
5286 </NetworkConnection>""".format(network_name, nicIndex,
5287 allocation_mode, vcd_netadapter_type)
5288 # Stub for ip_address feature
5289 if ip_address:
5290 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5291 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5292
5293 if mac_address:
5294 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5295 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5296
5297 data = data + new_item + '</NetworkConnectionSection>'
5298
5299 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5300
5301 response = self.perform_request(req_type='PUT',
5302 url=url_rest_call,
5303 headers=headers,
5304 data=data)
5305
5306 if response.status_code == 403:
5307 add_headers = {'Content-Type': headers['Content-Type']}
5308 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5309
5310 if response.status_code != 202:
5311 self.logger.error("REST call {} failed reason : {}"\
5312 "status code : {}".format(url_rest_call,
5313 response.text,
5314 response.status_code))
5315 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5316 "network connection section")
5317 else:
5318 nic_task = self.get_task_from_response(response.text)
5319 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5320 if result.get('status') == 'success':
5321 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5322 "conneced to NIC type {}".format(vm_id, nic_type))
5323 else:
5324 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5325 "failed to connect NIC type {}".format(vm_id, nic_type))
5326 except Exception as exp:
5327 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5328 "while adding Network adapter")
5329 raise vimconn.vimconnException(message=exp)
5330
5331
5332 def set_numa_affinity(self, vmuuid, paired_threads_id):
5333 """
5334 Method to assign numa affinity in vm configuration parammeters
5335 Args :
5336 vmuuid - vm uuid
5337 paired_threads_id - one or more virtual processor
5338 numbers
5339 Returns:
5340 return if True
5341 """
5342 try:
5343 vcenter_conect, content = self.get_vcenter_content()
5344 vm_moref_id = self.get_vm_moref_id(vmuuid)
5345
5346 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5347 if vm_obj:
5348 config_spec = vim.vm.ConfigSpec()
5349 config_spec.extraConfig = []
5350 opt = vim.option.OptionValue()
5351 opt.key = 'numa.nodeAffinity'
5352 opt.value = str(paired_threads_id)
5353 config_spec.extraConfig.append(opt)
5354 task = vm_obj.ReconfigVM_Task(config_spec)
5355 if task:
5356 result = self.wait_for_vcenter_task(task, vcenter_conect)
5357 extra_config = vm_obj.config.extraConfig
5358 flag = False
5359 for opts in extra_config:
5360 if 'numa.nodeAffinity' in opts.key:
5361 flag = True
5362 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5363 "value {} for vm {}".format(opt.value, vm_obj))
5364 if flag:
5365 return
5366 else:
5367 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5368 except Exception as exp:
5369 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5370 "for VM {} : {}".format(vm_obj, vm_moref_id))
5371 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5372 "affinity".format(exp))
5373
5374
5375 def cloud_init(self, vapp, cloud_config):
5376 """
5377 Method to inject ssh-key
5378 vapp - vapp object
5379 cloud_config a dictionary with:
5380 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5381 'users': (optional) list of users to be inserted, each item is a dict with:
5382 'name': (mandatory) user name,
5383 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5384 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5385 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5386 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5387 'dest': (mandatory) string with the destination absolute path
5388 'encoding': (optional, by default text). Can be one of:
5389 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5390 'content' (mandatory): string with the content of the file
5391 'permissions': (optional) string with file permissions, typically octal notation '0644'
5392 'owner': (optional) file owner, string with the format 'owner:group'
5393 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5394 """
5395 try:
5396 if not isinstance(cloud_config, dict):
5397 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5398 else:
5399 key_pairs = []
5400 userdata = []
5401 if "key-pairs" in cloud_config:
5402 key_pairs = cloud_config["key-pairs"]
5403
5404 if "users" in cloud_config:
5405 userdata = cloud_config["users"]
5406
5407 self.logger.debug("cloud_init : Guest os customization started..")
5408 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5409 customize_script = customize_script.replace("&","&amp;")
5410 self.guest_customization(vapp, customize_script)
5411
5412 except Exception as exp:
5413 self.logger.error("cloud_init : exception occurred while injecting "\
5414 "ssh-key")
5415 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5416 "ssh-key".format(exp))
5417
5418 def format_script(self, key_pairs=[], users_list=[]):
5419 bash_script = """#!/bin/sh
5420 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5421 if [ "$1" = "precustomization" ];then
5422 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5423 """
5424
5425 keys = "\n".join(key_pairs)
5426 if keys:
5427 keys_data = """
5428 if [ ! -d /root/.ssh ];then
5429 mkdir /root/.ssh
5430 chown root:root /root/.ssh
5431 chmod 700 /root/.ssh
5432 touch /root/.ssh/authorized_keys
5433 chown root:root /root/.ssh/authorized_keys
5434 chmod 600 /root/.ssh/authorized_keys
5435 # make centos with selinux happy
5436 which restorecon && restorecon -Rv /root/.ssh
5437 else
5438 touch /root/.ssh/authorized_keys
5439 chown root:root /root/.ssh/authorized_keys
5440 chmod 600 /root/.ssh/authorized_keys
5441 fi
5442 echo '{key}' >> /root/.ssh/authorized_keys
5443 """.format(key=keys)
5444
5445 bash_script+= keys_data
5446
5447 for user in users_list:
5448 if 'name' in user: user_name = user['name']
5449 if 'key-pairs' in user:
5450 user_keys = "\n".join(user['key-pairs'])
5451 else:
5452 user_keys = None
5453
5454 add_user_name = """
5455 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5456 """.format(user_name=user_name)
5457
5458 bash_script+= add_user_name
5459
5460 if user_keys:
5461 user_keys_data = """
5462 mkdir /home/{user_name}/.ssh
5463 chown {user_name}:{user_name} /home/{user_name}/.ssh
5464 chmod 700 /home/{user_name}/.ssh
5465 touch /home/{user_name}/.ssh/authorized_keys
5466 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5467 chmod 600 /home/{user_name}/.ssh/authorized_keys
5468 # make centos with selinux happy
5469 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5470 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5471 """.format(user_name=user_name,user_key=user_keys)
5472
5473 bash_script+= user_keys_data
5474
5475 return bash_script+"\n\tfi"
5476
5477 def guest_customization(self, vapp, customize_script):
5478 """
5479 Method to customize guest os
5480 vapp - Vapp object
5481 customize_script - Customize script to be run at first boot of VM.
5482 """
5483 for vm in vapp.get_all_vms():
5484 vm_id = vm.get('id').split(':')[-1]
5485 vm_name = vm.get('name')
5486 vm_name = vm_name.replace('_','-')
5487
5488 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5489 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5490 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5491
5492 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5493
5494 data = """<GuestCustomizationSection
5495 xmlns="http://www.vmware.com/vcloud/v1.5"
5496 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5497 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5498 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5499 <Enabled>true</Enabled>
5500 <ChangeSid>false</ChangeSid>
5501 <VirtualMachineId>{}</VirtualMachineId>
5502 <JoinDomainEnabled>false</JoinDomainEnabled>
5503 <UseOrgSettings>false</UseOrgSettings>
5504 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5505 <AdminPasswordAuto>true</AdminPasswordAuto>
5506 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5507 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5508 <ResetPasswordRequired>false</ResetPasswordRequired>
5509 <CustomizationScript>{}</CustomizationScript>
5510 <ComputerName>{}</ComputerName>
5511 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5512 </GuestCustomizationSection>
5513 """.format(vm_customization_url,
5514 vm_id,
5515 customize_script,
5516 vm_name,
5517 vm_customization_url)
5518
5519 response = self.perform_request(req_type='PUT',
5520 url=vm_customization_url,
5521 headers=headers,
5522 data=data)
5523 if response.status_code == 202:
5524 guest_task = self.get_task_from_response(response.text)
5525 self.client.get_task_monitor().wait_for_success(task=guest_task)
5526 self.logger.info("guest_customization : customized guest os task "\
5527 "completed for VM {}".format(vm_name))
5528 else:
5529 self.logger.error("guest_customization : task for customized guest os"\
5530 "failed for VM {}".format(vm_name))
5531 raise vimconn.vimconnException("guest_customization : failed to perform"\
5532 "guest os customization on VM {}".format(vm_name))
5533
5534 def add_new_disk(self, vapp_uuid, disk_size):
5535 """
5536 Method to create an empty vm disk
5537
5538 Args:
5539 vapp_uuid - is vapp identifier.
5540 disk_size - size of disk to be created in GB
5541
5542 Returns:
5543 None
5544 """
5545 status = False
5546 vm_details = None
5547 try:
5548 #Disk size in GB, convert it into MB
5549 if disk_size is not None:
5550 disk_size_mb = int(disk_size) * 1024
5551 vm_details = self.get_vapp_details_rest(vapp_uuid)
5552
5553 if vm_details and "vm_virtual_hardware" in vm_details:
5554 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5555 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5556 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5557
5558 except Exception as exp:
5559 msg = "Error occurred while creating new disk {}.".format(exp)
5560 self.rollback_newvm(vapp_uuid, msg)
5561
5562 if status:
5563 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5564 else:
5565 #If failed to add disk, delete VM
5566 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5567 self.rollback_newvm(vapp_uuid, msg)
5568
5569
5570 def add_new_disk_rest(self, disk_href, disk_size_mb):
5571 """
5572 Retrives vApp Disks section & add new empty disk
5573
5574 Args:
5575 disk_href: Disk section href to addd disk
5576 disk_size_mb: Disk size in MB
5577
5578 Returns: Status of add new disk task
5579 """
5580 status = False
5581 if self.client._session:
5582 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5583 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5584 response = self.perform_request(req_type='GET',
5585 url=disk_href,
5586 headers=headers)
5587
5588 if response.status_code == 403:
5589 response = self.retry_rest('GET', disk_href)
5590
5591 if response.status_code != requests.codes.ok:
5592 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5593 .format(disk_href, response.status_code))
5594 return status
5595 try:
5596 #Find but type & max of instance IDs assigned to disks
5597 lxmlroot_respond = lxmlElementTree.fromstring(response.text)
5598 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
5599 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5600 instance_id = 0
5601 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5602 if item.find("rasd:Description",namespaces).text == "Hard disk":
5603 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5604 if inst_id > instance_id:
5605 instance_id = inst_id
5606 disk_item = item.find("rasd:HostResource" ,namespaces)
5607 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5608 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5609
5610 instance_id = instance_id + 1
5611 new_item = """<Item>
5612 <rasd:Description>Hard disk</rasd:Description>
5613 <rasd:ElementName>New disk</rasd:ElementName>
5614 <rasd:HostResource
5615 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5616 vcloud:capacity="{}"
5617 vcloud:busSubType="{}"
5618 vcloud:busType="{}"></rasd:HostResource>
5619 <rasd:InstanceID>{}</rasd:InstanceID>
5620 <rasd:ResourceType>17</rasd:ResourceType>
5621 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5622
5623 new_data = response.text
5624 #Add new item at the bottom
5625 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5626
5627 # Send PUT request to modify virtual hardware section with new disk
5628 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5629
5630 response = self.perform_request(req_type='PUT',
5631 url=disk_href,
5632 data=new_data,
5633 headers=headers)
5634
5635 if response.status_code == 403:
5636 add_headers = {'Content-Type': headers['Content-Type']}
5637 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5638
5639 if response.status_code != 202:
5640 self.logger.error("PUT REST API call {} failed. Return status code {}. response.text:{}"
5641 .format(disk_href, response.status_code, response.text))
5642 else:
5643 add_disk_task = self.get_task_from_response(response.text)
5644 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5645 if result.get('status') == 'success':
5646 status = True
5647 else:
5648 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5649
5650 except Exception as exp:
5651 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5652
5653 return status
5654
5655
5656 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5657 """
5658 Method to add existing disk to vm
5659 Args :
5660 catalogs - List of VDC catalogs
5661 image_id - Catalog ID
5662 template_name - Name of template in catalog
5663 vapp_uuid - UUID of vApp
5664 Returns:
5665 None
5666 """
5667 disk_info = None
5668 vcenter_conect, content = self.get_vcenter_content()
5669 #find moref-id of vm in image
5670 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5671 image_id=image_id,
5672 )
5673
5674 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5675 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5676 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5677 if catalog_vm_moref_id:
5678 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5679 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5680 if catalog_vm_obj:
5681 #find existing disk
5682 disk_info = self.find_disk(catalog_vm_obj)
5683 else:
5684 exp_msg = "No VM with image id {} found".format(image_id)
5685 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5686 else:
5687 exp_msg = "No Image found with image ID {} ".format(image_id)
5688 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5689
5690 if disk_info:
5691 self.logger.info("Existing disk_info : {}".format(disk_info))
5692 #get VM
5693 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5694 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5695 if vm_obj:
5696 status = self.add_disk(vcenter_conect=vcenter_conect,
5697 vm=vm_obj,
5698 disk_info=disk_info,
5699 size=size,
5700 vapp_uuid=vapp_uuid
5701 )
5702 if status:
5703 self.logger.info("Disk from image id {} added to {}".format(image_id,
5704 vm_obj.config.name)
5705 )
5706 else:
5707 msg = "No disk found with image id {} to add in VM {}".format(
5708 image_id,
5709 vm_obj.config.name)
5710 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5711
5712
5713 def find_disk(self, vm_obj):
5714 """
5715 Method to find details of existing disk in VM
5716 Args :
5717 vm_obj - vCenter object of VM
5718 image_id - Catalog ID
5719 Returns:
5720 disk_info : dict of disk details
5721 """
5722 disk_info = {}
5723 if vm_obj:
5724 try:
5725 devices = vm_obj.config.hardware.device
5726 for device in devices:
5727 if type(device) is vim.vm.device.VirtualDisk:
5728 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5729 disk_info["full_path"] = device.backing.fileName
5730 disk_info["datastore"] = device.backing.datastore
5731 disk_info["capacityKB"] = device.capacityInKB
5732 break
5733 except Exception as exp:
5734 self.logger.error("find_disk() : exception occurred while "\
5735 "getting existing disk details :{}".format(exp))
5736 return disk_info
5737
5738
5739 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5740 """
5741 Method to add existing disk in VM
5742 Args :
5743 vcenter_conect - vCenter content object
5744 vm - vCenter vm object
5745 disk_info : dict of disk details
5746 Returns:
5747 status : status of add disk task
5748 """
5749 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5750 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5751 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5752 if size is not None:
5753 #Convert size from GB to KB
5754 sizeKB = int(size) * 1024 * 1024
5755 #compare size of existing disk and user given size.Assign whicherver is greater
5756 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5757 sizeKB, capacityKB))
5758 if sizeKB > capacityKB:
5759 capacityKB = sizeKB
5760
5761 if datastore and fullpath and capacityKB:
5762 try:
5763 spec = vim.vm.ConfigSpec()
5764 # get all disks on a VM, set unit_number to the next available
5765 unit_number = 0
5766 for dev in vm.config.hardware.device:
5767 if hasattr(dev.backing, 'fileName'):
5768 unit_number = int(dev.unitNumber) + 1
5769 # unit_number 7 reserved for scsi controller
5770 if unit_number == 7:
5771 unit_number += 1
5772 if isinstance(dev, vim.vm.device.VirtualDisk):
5773 #vim.vm.device.VirtualSCSIController
5774 controller_key = dev.controllerKey
5775
5776 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5777 unit_number, controller_key))
5778 # add disk here
5779 dev_changes = []
5780 disk_spec = vim.vm.device.VirtualDeviceSpec()
5781 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5782 disk_spec.device = vim.vm.device.VirtualDisk()
5783 disk_spec.device.backing = \
5784 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5785 disk_spec.device.backing.thinProvisioned = True
5786 disk_spec.device.backing.diskMode = 'persistent'
5787 disk_spec.device.backing.datastore = datastore
5788 disk_spec.device.backing.fileName = fullpath
5789
5790 disk_spec.device.unitNumber = unit_number
5791 disk_spec.device.capacityInKB = capacityKB
5792 disk_spec.device.controllerKey = controller_key
5793 dev_changes.append(disk_spec)
5794 spec.deviceChange = dev_changes
5795 task = vm.ReconfigVM_Task(spec=spec)
5796 status = self.wait_for_vcenter_task(task, vcenter_conect)
5797 return status
5798 except Exception as exp:
5799 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5800 "{} to vm {}".format(exp,
5801 fullpath,
5802 vm.config.name)
5803 self.rollback_newvm(vapp_uuid, exp_msg)
5804 else:
5805 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5806 self.rollback_newvm(vapp_uuid, msg)
5807
5808
5809 def get_vcenter_content(self):
5810 """
5811 Get the vsphere content object
5812 """
5813 try:
5814 vm_vcenter_info = self.get_vm_vcenter_info()
5815 except Exception as exp:
5816 self.logger.error("Error occurred while getting vCenter infromationn"\
5817 " for VM : {}".format(exp))
5818 raise vimconn.vimconnException(message=exp)
5819
5820 context = None
5821 if hasattr(ssl, '_create_unverified_context'):
5822 context = ssl._create_unverified_context()
5823
5824 vcenter_conect = SmartConnect(
5825 host=vm_vcenter_info["vm_vcenter_ip"],
5826 user=vm_vcenter_info["vm_vcenter_user"],
5827 pwd=vm_vcenter_info["vm_vcenter_password"],
5828 port=int(vm_vcenter_info["vm_vcenter_port"]),
5829 sslContext=context
5830 )
5831 atexit.register(Disconnect, vcenter_conect)
5832 content = vcenter_conect.RetrieveContent()
5833 return vcenter_conect, content
5834
5835
5836 def get_vm_moref_id(self, vapp_uuid):
5837 """
5838 Get the moref_id of given VM
5839 """
5840 try:
5841 if vapp_uuid:
5842 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5843 if vm_details and "vm_vcenter_info" in vm_details:
5844 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5845 return vm_moref_id
5846
5847 except Exception as exp:
5848 self.logger.error("Error occurred while getting VM moref ID "\
5849 " for VM : {}".format(exp))
5850 return None
5851
5852
5853 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5854 """
5855 Method to get vApp template details
5856 Args :
5857 catalogs - list of VDC catalogs
5858 image_id - Catalog ID to find
5859 template_name : template name in catalog
5860 Returns:
5861 parsed_respond : dict of vApp tempalte details
5862 """
5863 parsed_response = {}
5864
5865 vca = self.connect_as_admin()
5866 if not vca:
5867 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5868
5869 try:
5870 org, vdc = self.get_vdc_details()
5871 catalog = self.get_catalog_obj(image_id, catalogs)
5872 if catalog:
5873 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5874 catalog_items = [items.attrib]
5875
5876 if len(catalog_items) == 1:
5877 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5878 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5879
5880 response = self.perform_request(req_type='GET',
5881 url=catalog_items[0].get('href'),
5882 headers=headers)
5883 catalogItem = XmlElementTree.fromstring(response.text)
5884 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5885 vapp_tempalte_href = entity.get("href")
5886 #get vapp details and parse moref id
5887
5888 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5889 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5890 'vmw': 'http://www.vmware.com/schema/ovf',
5891 'vm': 'http://www.vmware.com/vcloud/v1.5',
5892 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5893 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5894 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5895 }
5896
5897 if vca._session:
5898 response = self.perform_request(req_type='GET',
5899 url=vapp_tempalte_href,
5900 headers=headers)
5901
5902 if response.status_code != requests.codes.ok:
5903 self.logger.debug("REST API call {} failed. Return status code {}".format(
5904 vapp_tempalte_href, response.status_code))
5905
5906 else:
5907 xmlroot_respond = XmlElementTree.fromstring(response.text)
5908 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5909 if children_section is not None:
5910 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5911 if vCloud_extension_section is not None:
5912 vm_vcenter_info = {}
5913 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5914 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5915 if vmext is not None:
5916 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5917 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5918
5919 except Exception as exp :
5920 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5921
5922 return parsed_response
5923
5924
5925 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5926 """
5927 Method to delete vApp
5928 Args :
5929 vapp_uuid - vApp UUID
5930 msg - Error message to be logged
5931 exp_type : Exception type
5932 Returns:
5933 None
5934 """
5935 if vapp_uuid:
5936 status = self.delete_vminstance(vapp_uuid)
5937 else:
5938 msg = "No vApp ID"
5939 self.logger.error(msg)
5940 if exp_type == "Genric":
5941 raise vimconn.vimconnException(msg)
5942 elif exp_type == "NotFound":
5943 raise vimconn.vimconnNotFoundException(message=msg)
5944
5945 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5946 """
5947 Method to attach SRIOV adapters to VM
5948
5949 Args:
5950 vapp_uuid - uuid of vApp/VM
5951 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5952 vmname_andid - vmname
5953
5954 Returns:
5955 The status of add SRIOV adapter task , vm object and
5956 vcenter_conect object
5957 """
5958 vm_obj = None
5959 vcenter_conect, content = self.get_vcenter_content()
5960 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5961
5962 if vm_moref_id:
5963 try:
5964 no_of_sriov_devices = len(sriov_nets)
5965 if no_of_sriov_devices > 0:
5966 #Get VM and its host
5967 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5968 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5969 if host_obj and vm_obj:
5970 #get SRIOV devies from host on which vapp is currently installed
5971 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5972 no_of_sriov_devices,
5973 )
5974
5975 if len(avilable_sriov_devices) == 0:
5976 #find other hosts with active pci devices
5977 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5978 content,
5979 no_of_sriov_devices,
5980 )
5981
5982 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5983 #Migrate vm to the host where SRIOV devices are available
5984 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5985 new_host_obj))
5986 task = self.relocate_vm(new_host_obj, vm_obj)
5987 if task is not None:
5988 result = self.wait_for_vcenter_task(task, vcenter_conect)
5989 self.logger.info("Migrate VM status: {}".format(result))
5990 host_obj = new_host_obj
5991 else:
5992 self.logger.info("Fail to migrate VM : {}".format(result))
5993 raise vimconn.vimconnNotFoundException(
5994 "Fail to migrate VM : {} to host {}".format(
5995 vmname_andid,
5996 new_host_obj)
5997 )
5998
5999 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
6000 #Add SRIOV devices one by one
6001 for sriov_net in sriov_nets:
6002 network_name = sriov_net.get('net_id')
6003 dvs_portgr_name = self.create_dvPort_group(network_name)
6004 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
6005 #add vlan ID ,Modify portgroup for vlan ID
6006 self.configure_vlanID(content, vcenter_conect, network_name)
6007
6008 task = self.add_sriov_to_vm(content,
6009 vm_obj,
6010 host_obj,
6011 network_name,
6012 avilable_sriov_devices[0]
6013 )
6014 if task:
6015 status= self.wait_for_vcenter_task(task, vcenter_conect)
6016 if status:
6017 self.logger.info("Added SRIOV {} to VM {}".format(
6018 no_of_sriov_devices,
6019 str(vm_obj)))
6020 else:
6021 self.logger.error("Fail to add SRIOV {} to VM {}".format(
6022 no_of_sriov_devices,
6023 str(vm_obj)))
6024 raise vimconn.vimconnUnexpectedResponse(
6025 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
6026 )
6027 return True, vm_obj, vcenter_conect
6028 else:
6029 self.logger.error("Currently there is no host with"\
6030 " {} number of avaialble SRIOV "\
6031 "VFs required for VM {}".format(
6032 no_of_sriov_devices,
6033 vmname_andid)
6034 )
6035 raise vimconn.vimconnNotFoundException(
6036 "Currently there is no host with {} "\
6037 "number of avaialble SRIOV devices required for VM {}".format(
6038 no_of_sriov_devices,
6039 vmname_andid))
6040 else:
6041 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
6042
6043 except vmodl.MethodFault as error:
6044 self.logger.error("Error occurred while adding SRIOV {} ",error)
6045 return None, vm_obj, vcenter_conect
6046
6047
6048 def get_sriov_devices(self,host, no_of_vfs):
6049 """
6050 Method to get the details of SRIOV devices on given host
6051 Args:
6052 host - vSphere host object
6053 no_of_vfs - number of VFs needed on host
6054
6055 Returns:
6056 array of SRIOV devices
6057 """
6058 sriovInfo=[]
6059 if host:
6060 for device in host.config.pciPassthruInfo:
6061 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
6062 if device.numVirtualFunction >= no_of_vfs:
6063 sriovInfo.append(device)
6064 break
6065 return sriovInfo
6066
6067
6068 def get_host_and_sriov_devices(self, content, no_of_vfs):
6069 """
6070 Method to get the details of SRIOV devices infromation on all hosts
6071
6072 Args:
6073 content - vSphere host object
6074 no_of_vfs - number of pci VFs needed on host
6075
6076 Returns:
6077 array of SRIOV devices and host object
6078 """
6079 host_obj = None
6080 sriov_device_objs = None
6081 try:
6082 if content:
6083 container = content.viewManager.CreateContainerView(content.rootFolder,
6084 [vim.HostSystem], True)
6085 for host in container.view:
6086 devices = self.get_sriov_devices(host, no_of_vfs)
6087 if devices:
6088 host_obj = host
6089 sriov_device_objs = devices
6090 break
6091 except Exception as exp:
6092 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6093
6094 return host_obj,sriov_device_objs
6095
6096
6097 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6098 """
6099 Method to add SRIOV adapter to vm
6100
6101 Args:
6102 host_obj - vSphere host object
6103 vm_obj - vSphere vm object
6104 content - vCenter content object
6105 network_name - name of distributed virtaul portgroup
6106 sriov_device - SRIOV device info
6107
6108 Returns:
6109 task object
6110 """
6111 devices = []
6112 vnic_label = "sriov nic"
6113 try:
6114 dvs_portgr = self.get_dvport_group(network_name)
6115 network_name = dvs_portgr.name
6116 nic = vim.vm.device.VirtualDeviceSpec()
6117 # VM device
6118 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6119 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6120 nic.device.addressType = 'assigned'
6121 #nic.device.key = 13016
6122 nic.device.deviceInfo = vim.Description()
6123 nic.device.deviceInfo.label = vnic_label
6124 nic.device.deviceInfo.summary = network_name
6125 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6126
6127 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6128 nic.device.backing.deviceName = network_name
6129 nic.device.backing.useAutoDetect = False
6130 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6131 nic.device.connectable.startConnected = True
6132 nic.device.connectable.allowGuestControl = True
6133
6134 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6135 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6136 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6137
6138 devices.append(nic)
6139 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6140 task = vm_obj.ReconfigVM_Task(vmconf)
6141 return task
6142 except Exception as exp:
6143 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6144 return None
6145
6146
6147 def create_dvPort_group(self, network_name):
6148 """
6149 Method to create disributed virtual portgroup
6150
6151 Args:
6152 network_name - name of network/portgroup
6153
6154 Returns:
6155 portgroup key
6156 """
6157 try:
6158 new_network_name = [network_name, '-', str(uuid.uuid4())]
6159 network_name=''.join(new_network_name)
6160 vcenter_conect, content = self.get_vcenter_content()
6161
6162 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6163 if dv_switch:
6164 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6165 dv_pg_spec.name = network_name
6166
6167 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6168 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6169 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6170 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6171 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6172 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6173
6174 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6175 self.wait_for_vcenter_task(task, vcenter_conect)
6176
6177 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6178 if dvPort_group:
6179 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6180 return dvPort_group.key
6181 else:
6182 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6183
6184 except Exception as exp:
6185 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6186 " : {}".format(network_name, exp))
6187 return None
6188
6189 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6190 """
6191 Method to reconfigure disributed virtual portgroup
6192
6193 Args:
6194 dvPort_group_name - name of disributed virtual portgroup
6195 content - vCenter content object
6196 config_info - disributed virtual portgroup configuration
6197
6198 Returns:
6199 task object
6200 """
6201 try:
6202 dvPort_group = self.get_dvport_group(dvPort_group_name)
6203 if dvPort_group:
6204 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6205 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6206 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6207 if "vlanID" in config_info:
6208 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6209 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6210
6211 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6212 return task
6213 else:
6214 return None
6215 except Exception as exp:
6216 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6217 " : {}".format(dvPort_group_name, exp))
6218 return None
6219
6220
6221 def destroy_dvport_group(self , dvPort_group_name):
6222 """
6223 Method to destroy disributed virtual portgroup
6224
6225 Args:
6226 network_name - name of network/portgroup
6227
6228 Returns:
6229 True if portgroup successfully got deleted else false
6230 """
6231 vcenter_conect, content = self.get_vcenter_content()
6232 try:
6233 status = None
6234 dvPort_group = self.get_dvport_group(dvPort_group_name)
6235 if dvPort_group:
6236 task = dvPort_group.Destroy_Task()
6237 status = self.wait_for_vcenter_task(task, vcenter_conect)
6238 return status
6239 except vmodl.MethodFault as exp:
6240 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6241 exp, dvPort_group_name))
6242 return None
6243
6244
6245 def get_dvport_group(self, dvPort_group_name):
6246 """
6247 Method to get disributed virtual portgroup
6248
6249 Args:
6250 network_name - name of network/portgroup
6251
6252 Returns:
6253 portgroup object
6254 """
6255 vcenter_conect, content = self.get_vcenter_content()
6256 dvPort_group = None
6257 try:
6258 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6259 for item in container.view:
6260 if item.key == dvPort_group_name:
6261 dvPort_group = item
6262 break
6263 return dvPort_group
6264 except vmodl.MethodFault as exp:
6265 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6266 exp, dvPort_group_name))
6267 return None
6268
6269 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6270 """
6271 Method to get disributed virtual portgroup vlanID
6272
6273 Args:
6274 network_name - name of network/portgroup
6275
6276 Returns:
6277 vlan ID
6278 """
6279 vlanId = None
6280 try:
6281 dvPort_group = self.get_dvport_group(dvPort_group_name)
6282 if dvPort_group:
6283 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6284 except vmodl.MethodFault as exp:
6285 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6286 exp, dvPort_group_name))
6287 return vlanId
6288
6289
6290 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6291 """
6292 Method to configure vlanID in disributed virtual portgroup vlanID
6293
6294 Args:
6295 network_name - name of network/portgroup
6296
6297 Returns:
6298 None
6299 """
6300 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6301 if vlanID == 0:
6302 #configure vlanID
6303 vlanID = self.genrate_vlanID(dvPort_group_name)
6304 config = {"vlanID":vlanID}
6305 task = self.reconfig_portgroup(content, dvPort_group_name,
6306 config_info=config)
6307 if task:
6308 status= self.wait_for_vcenter_task(task, vcenter_conect)
6309 if status:
6310 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6311 dvPort_group_name,vlanID))
6312 else:
6313 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6314 dvPort_group_name, vlanID))
6315
6316
6317 def genrate_vlanID(self, network_name):
6318 """
6319 Method to get unused vlanID
6320 Args:
6321 network_name - name of network/portgroup
6322 Returns:
6323 vlanID
6324 """
6325 vlan_id = None
6326 used_ids = []
6327 if self.config.get('vlanID_range') == None:
6328 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6329 "at config value before creating sriov network with vlan tag")
6330 if "used_vlanIDs" not in self.persistent_info:
6331 self.persistent_info["used_vlanIDs"] = {}
6332 else:
6333 used_ids = list(self.persistent_info["used_vlanIDs"].values())
6334
6335 for vlanID_range in self.config.get('vlanID_range'):
6336 start_vlanid, end_vlanid = vlanID_range.split("-")
6337 if start_vlanid > end_vlanid:
6338 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6339 vlanID_range))
6340
6341 for id in range(int(start_vlanid), int(end_vlanid) + 1):
6342 if id not in used_ids:
6343 vlan_id = id
6344 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6345 return vlan_id
6346 if vlan_id is None:
6347 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6348
6349
6350 def get_obj(self, content, vimtype, name):
6351 """
6352 Get the vsphere object associated with a given text name
6353 """
6354 obj = None
6355 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6356 for item in container.view:
6357 if item.name == name:
6358 obj = item
6359 break
6360 return obj
6361
6362
6363 def insert_media_to_vm(self, vapp, image_id):
6364 """
6365 Method to insert media CD-ROM (ISO image) from catalog to vm.
6366 vapp - vapp object to get vm id
6367 Image_id - image id for cdrom to be inerted to vm
6368 """
6369 # create connection object
6370 vca = self.connect()
6371 try:
6372 # fetching catalog details
6373 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6374 if vca._session:
6375 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6376 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6377 response = self.perform_request(req_type='GET',
6378 url=rest_url,
6379 headers=headers)
6380
6381 if response.status_code != 200:
6382 self.logger.error("REST call {} failed reason : {}"\
6383 "status code : {}".format(url_rest_call,
6384 response.text,
6385 response.status_code))
6386 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6387 "catalog details")
6388 # searching iso name and id
6389 iso_name, media_id = self.get_media_details(vca, response.text)
6390
6391 if iso_name and media_id:
6392 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6393 <ns6:MediaInsertOrEjectParams
6394 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6395 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6396 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6397 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6398 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6399 xmlns:ns7="http://www.vmware.com/schema/ovf"
6400 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6401 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6402 <ns6:Media
6403 type="application/vnd.vmware.vcloud.media+xml"
6404 name="{}"
6405 id="urn:vcloud:media:{}"
6406 href="https://{}/api/media/{}"/>
6407 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6408 self.url,media_id)
6409
6410 for vms in vapp.get_all_vms():
6411 vm_id = vms.get('id').split(':')[-1]
6412
6413 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6414 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6415
6416 response = self.perform_request(req_type='POST',
6417 url=rest_url,
6418 data=data,
6419 headers=headers)
6420
6421 if response.status_code != 202:
6422 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6423 "Status code {}".format(response.text, response.status_code)
6424 self.logger.error(error_msg)
6425 raise vimconn.vimconnException(error_msg)
6426 else:
6427 task = self.get_task_from_response(response.text)
6428 result = self.client.get_task_monitor().wait_for_success(task=task)
6429 if result.get('status') == 'success':
6430 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6431 " image to vm {}".format(vm_id))
6432
6433 except Exception as exp:
6434 self.logger.error("insert_media_to_vm() : exception occurred "\
6435 "while inserting media CD-ROM")
6436 raise vimconn.vimconnException(message=exp)
6437
6438
6439 def get_media_details(self, vca, content):
6440 """
6441 Method to get catalog item details
6442 vca - connection object
6443 content - Catalog details
6444 Return - Media name, media id
6445 """
6446 cataloghref_list = []
6447 try:
6448 if content:
6449 vm_list_xmlroot = XmlElementTree.fromstring(content)
6450 for child in vm_list_xmlroot.iter():
6451 if 'CatalogItem' in child.tag:
6452 cataloghref_list.append(child.attrib.get('href'))
6453 if cataloghref_list is not None:
6454 for href in cataloghref_list:
6455 if href:
6456 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6457 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6458 response = self.perform_request(req_type='GET',
6459 url=href,
6460 headers=headers)
6461 if response.status_code != 200:
6462 self.logger.error("REST call {} failed reason : {}"\
6463 "status code : {}".format(href,
6464 response.text,
6465 response.status_code))
6466 raise vimconn.vimconnException("get_media_details : Failed to get "\
6467 "catalogitem details")
6468 list_xmlroot = XmlElementTree.fromstring(response.text)
6469 for child in list_xmlroot.iter():
6470 if 'Entity' in child.tag:
6471 if 'media' in child.attrib.get('href'):
6472 name = child.attrib.get('name')
6473 media_id = child.attrib.get('href').split('/').pop()
6474 return name,media_id
6475 else:
6476 self.logger.debug("Media name and id not found")
6477 return False,False
6478 except Exception as exp:
6479 self.logger.error("get_media_details : exception occurred "\
6480 "getting media details")
6481 raise vimconn.vimconnException(message=exp)
6482
6483
6484 def retry_rest(self, method, url, add_headers=None, data=None):
6485 """ Method to get Token & retry respective REST request
6486 Args:
6487 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6488 url - request url to be used
6489 add_headers - Additional headers (optional)
6490 data - Request payload data to be passed in request
6491 Returns:
6492 response - Response of request
6493 """
6494 response = None
6495
6496 #Get token
6497 self.get_token()
6498
6499 if self.client._session:
6500 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6501 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6502
6503 if add_headers:
6504 headers.update(add_headers)
6505
6506 if method == 'GET':
6507 response = self.perform_request(req_type='GET',
6508 url=url,
6509 headers=headers)
6510 elif method == 'PUT':
6511 response = self.perform_request(req_type='PUT',
6512 url=url,
6513 headers=headers,
6514 data=data)
6515 elif method == 'POST':
6516 response = self.perform_request(req_type='POST',
6517 url=url,
6518 headers=headers,
6519 data=data)
6520 elif method == 'DELETE':
6521 response = self.perform_request(req_type='DELETE',
6522 url=url,
6523 headers=headers)
6524 return response
6525
6526
6527 def get_token(self):
6528 """ Generate a new token if expired
6529
6530 Returns:
6531 The return client object that letter can be used to connect to vCloud director as admin for VDC
6532 """
6533 self.client = self.connect()
6534
6535 def get_vdc_details(self):
6536 """ Get VDC details using pyVcloud Lib
6537
6538 Returns org and vdc object
6539 """
6540 vdc = None
6541 try:
6542 org = Org(self.client, resource=self.client.get_org())
6543 vdc = org.get_vdc(self.tenant_name)
6544 except Exception as e:
6545 # pyvcloud not giving a specific exception, Refresh nevertheless
6546 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6547
6548 #Retry once, if failed by refreshing token
6549 if vdc is None:
6550 self.get_token()
6551 org = Org(self.client, resource=self.client.get_org())
6552 vdc = org.get_vdc(self.tenant_name)
6553
6554 return org, vdc
6555
6556
6557 def perform_request(self, req_type, url, headers=None, data=None):
6558 """Perform the POST/PUT/GET/DELETE request."""
6559
6560 #Log REST request details
6561 self.log_request(req_type, url=url, headers=headers, data=data)
6562 # perform request and return its result
6563 if req_type == 'GET':
6564 response = requests.get(url=url,
6565 headers=headers,
6566 verify=False)
6567 elif req_type == 'PUT':
6568 response = requests.put(url=url,
6569 headers=headers,
6570 data=data,
6571 verify=False)
6572 elif req_type == 'POST':
6573 response = requests.post(url=url,
6574 headers=headers,
6575 data=data,
6576 verify=False)
6577 elif req_type == 'DELETE':
6578 response = requests.delete(url=url,
6579 headers=headers,
6580 verify=False)
6581 #Log the REST response
6582 self.log_response(response)
6583
6584 return response
6585
6586
6587 def log_request(self, req_type, url=None, headers=None, data=None):
6588 """Logs REST request details"""
6589
6590 if req_type is not None:
6591 self.logger.debug("Request type: {}".format(req_type))
6592
6593 if url is not None:
6594 self.logger.debug("Request url: {}".format(url))
6595
6596 if headers is not None:
6597 for header in headers:
6598 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6599
6600 if data is not None:
6601 self.logger.debug("Request data: {}".format(data))
6602
6603
6604 def log_response(self, response):
6605 """Logs REST response details"""
6606
6607 self.logger.debug("Response status code: {} ".format(response.status_code))
6608
6609
6610 def get_task_from_response(self, content):
6611 """
6612 content - API response.text(response.text)
6613 return task object
6614 """
6615 xmlroot = XmlElementTree.fromstring(content)
6616 if xmlroot.tag.split('}')[1] == "Task":
6617 return xmlroot
6618 else:
6619 for ele in xmlroot:
6620 if ele.tag.split("}")[1] == "Tasks":
6621 task = ele[0]
6622 break
6623 return task
6624
6625
6626 def power_on_vapp(self,vapp_id, vapp_name):
6627 """
6628 vapp_id - vApp uuid
6629 vapp_name - vAapp name
6630 return - Task object
6631 """
6632 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6633 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6634
6635 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6636 vapp_id)
6637 response = self.perform_request(req_type='POST',
6638 url=poweron_href,
6639 headers=headers)
6640
6641 if response.status_code != 202:
6642 self.logger.error("REST call {} failed reason : {}"\
6643 "status code : {} ".format(poweron_href,
6644 response.text,
6645 response.status_code))
6646 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6647 "vApp {}".format(vapp_name))
6648 else:
6649 poweron_task = self.get_task_from_response(response.text)
6650 return poweron_task
6651
6652