(bug 880) fixing vmware VIM plugin python3 migration issues
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 from osm_ro import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import itertools
37 import requests
38 import ssl
39 import atexit
40
41 from pyVmomi import vim, vmodl
42 from pyVim.connect import SmartConnect, Disconnect
43
44 from xml.etree import ElementTree as XmlElementTree
45 from lxml import etree as lxmlElementTree
46
47 import yaml
48 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
49 from pyvcloud.vcd.vdc import VDC
50 from pyvcloud.vcd.org import Org
51 import re
52 from pyvcloud.vcd.vapp import VApp
53 from xml.sax.saxutils import escape
54 import logging
55 import json
56 import time
57 import uuid
58 # import httplib
59 #For python3
60 #import http.client # TODO py3 check
61 import hashlib
62 import socket
63 import struct
64 import netaddr
65 import random
66
67 # global variable for vcd connector type
68 STANDALONE = 'standalone'
69
70 # key for flavor dicts
71 FLAVOR_RAM_KEY = 'ram'
72 FLAVOR_VCPUS_KEY = 'vcpus'
73 FLAVOR_DISK_KEY = 'disk'
74 DEFAULT_IP_PROFILE = {'dhcp_count':50,
75 'dhcp_enabled':True,
76 'ip_version':"IPv4"
77 }
78 # global variable for wait time
79 INTERVAL_TIME = 5
80 MAX_WAIT_TIME = 1800
81
82 API_VERSION = '27.0'
83
84 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
85 __date__ = "$09-Mar-2018 11:09:29$"
86 __version__ = '0.2'
87
88 # -1: "Could not be created",
89 # 0: "Unresolved",
90 # 1: "Resolved",
91 # 2: "Deployed",
92 # 3: "Suspended",
93 # 4: "Powered on",
94 # 5: "Waiting for user input",
95 # 6: "Unknown state",
96 # 7: "Unrecognized state",
97 # 8: "Powered off",
98 # 9: "Inconsistent state",
99 # 10: "Children do not all have the same status",
100 # 11: "Upload initiated, OVF descriptor pending",
101 # 12: "Upload initiated, copying contents",
102 # 13: "Upload initiated , disk contents pending",
103 # 14: "Upload has been quarantined",
104 # 15: "Upload quarantine period has expired"
105
106 # mapping vCD status to MANO
107 vcdStatusCode2manoFormat = {4: 'ACTIVE',
108 7: 'PAUSED',
109 3: 'SUSPENDED',
110 8: 'INACTIVE',
111 12: 'BUILD',
112 -1: 'ERROR',
113 14: 'DELETED'}
114
115 #
116 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
117 'ERROR': 'ERROR', 'DELETED': 'DELETED'
118 }
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '{}'".format(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '{}'".format(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False)
309 client_as_admin.set_highest_supported_version()
310 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
311 except Exception as e:
312 raise vimconn.vimconnException(
313 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
314
315 return client_as_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return client object that latter can be used to connect to vCloud director as admin for VDC
322 """
323 try:
324 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
325 self.user,
326 self.org_name))
327 host = self.url
328 client = Client(host, verify_ssl_certs=False)
329 client.set_highest_supported_version()
330 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
331 except:
332 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
333 "{} as user: {}".format(self.org_name, self.user))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except:
393 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
449 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
450 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
451 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
452 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
453
454 response = self.perform_request(req_type='DELETE',
455 url=vdc_remove_href,
456 headers=headers)
457
458 if response.status_code == 202:
459 time.sleep(5)
460 return tenant_id
461 else:
462 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
463 "Return status code {}".format(vdc_remove_href,
464 response.status_code))
465 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
466 else:
467 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
468 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
469
470
471 def get_tenant_list(self, filter_dict={}):
472 """Obtain tenants of VIM
473 filter_dict can contain the following keys:
474 name: filter by tenant name
475 id: filter by tenant uuid/id
476 <other VIM specific>
477 Returns the tenant list of dictionaries:
478 [{'name':'<name>, 'id':'<id>, ...}, ...]
479
480 """
481 org_dict = self.get_org(self.org_uuid)
482 vdcs_dict = org_dict['vdcs']
483
484 vdclist = []
485 try:
486 for k in vdcs_dict:
487 entry = {'name': vdcs_dict[k], 'id': k}
488 # if caller didn't specify dictionary we return all tenants.
489 if filter_dict is not None and filter_dict:
490 filtered_entry = entry.copy()
491 filtered_dict = set(entry.keys()) - set(filter_dict)
492 for unwanted_key in filtered_dict: del entry[unwanted_key]
493 if filter_dict == entry:
494 vdclist.append(filtered_entry)
495 else:
496 vdclist.append(entry)
497 except:
498 self.logger.debug("Error in get_tenant_list()")
499 self.logger.debug(traceback.format_exc())
500 raise vimconn.vimconnException("Incorrect state. {}")
501
502 return vdclist
503
504 def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
505 """Adds a tenant network to VIM
506 Params:
507 'net_name': name of the network
508 'net_type': one of:
509 'bridge': overlay isolated network
510 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
511 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
512 'ip_profile': is a dict containing the IP parameters of the network
513 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
514 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
515 'gateway_address': (Optional) ip_schema, that is X.X.X.X
516 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
517 'dhcp_enabled': True or False
518 'dhcp_start_address': ip_schema, first IP to grant
519 'dhcp_count': number of IPs to grant.
520 'shared': if this network can be seen/use by other tenants/organization
521 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
522 Returns a tuple with the network identifier and created_items, or raises an exception on error
523 created_items can be None or a dictionary where this method can include key-values that will be passed to
524 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
525 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
526 as not present.
527 """
528
529 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
530 .format(net_name, net_type, ip_profile, shared))
531
532 created_items = {}
533 isshared = 'false'
534 if shared:
535 isshared = 'true'
536
537 # ############# Stub code for SRIOV #################
538 # if net_type == "data" or net_type == "ptp":
539 # if self.config.get('dv_switch_name') == None:
540 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
541 # network_uuid = self.create_dvPort_group(net_name)
542
543 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
544 ip_profile=ip_profile, isshared=isshared)
545 if network_uuid is not None:
546 return network_uuid, created_items
547 else:
548 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
549
550 def get_vcd_network_list(self):
551 """ Method available organization for a logged in tenant
552
553 Returns:
554 The return vca object that letter can be used to connect to vcloud direct as admin
555 """
556
557 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
558
559 if not self.tenant_name:
560 raise vimconn.vimconnConnectionException("Tenant name is empty.")
561
562 org, vdc = self.get_vdc_details()
563 if vdc is None:
564 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
565
566 vdc_uuid = vdc.get('id').split(":")[3]
567 if self.client._session:
568 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
569 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
570 response = self.perform_request(req_type='GET',
571 url=vdc.get('href'),
572 headers=headers)
573 if response.status_code != 200:
574 self.logger.error("Failed to get vdc content")
575 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
576 else:
577 content = XmlElementTree.fromstring(response.content)
578
579 network_list = []
580 try:
581 for item in content:
582 if item.tag.split('}')[-1] == 'AvailableNetworks':
583 for net in item:
584 response = self.perform_request(req_type='GET',
585 url=net.get('href'),
586 headers=headers)
587
588 if response.status_code != 200:
589 self.logger.error("Failed to get network content")
590 raise vimconn.vimconnNotFoundException("Failed to get network content")
591 else:
592 net_details = XmlElementTree.fromstring(response.content)
593
594 filter_dict = {}
595 net_uuid = net_details.get('id').split(":")
596 if len(net_uuid) != 4:
597 continue
598 else:
599 net_uuid = net_uuid[3]
600 # create dict entry
601 self.logger.debug("get_vcd_network_list(): Adding network {} "
602 "to a list vcd id {} network {}".format(net_uuid,
603 vdc_uuid,
604 net_details.get('name')))
605 filter_dict["name"] = net_details.get('name')
606 filter_dict["id"] = net_uuid
607 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
608 shared = True
609 else:
610 shared = False
611 filter_dict["shared"] = shared
612 filter_dict["tenant_id"] = vdc_uuid
613 if int(net_details.get('status')) == 1:
614 filter_dict["admin_state_up"] = True
615 else:
616 filter_dict["admin_state_up"] = False
617 filter_dict["status"] = "ACTIVE"
618 filter_dict["type"] = "bridge"
619 network_list.append(filter_dict)
620 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
621 except:
622 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
623 pass
624
625 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
626 return network_list
627
628 def get_network_list(self, filter_dict={}):
629 """Obtain tenant networks of VIM
630 Filter_dict can be:
631 name: network name OR/AND
632 id: network uuid OR/AND
633 shared: boolean OR/AND
634 tenant_id: tenant OR/AND
635 admin_state_up: boolean
636 status: 'ACTIVE'
637
638 [{key : value , key : value}]
639
640 Returns the network list of dictionaries:
641 [{<the fields at Filter_dict plus some VIM specific>}, ...]
642 List can be empty
643 """
644
645 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
646
647 if not self.tenant_name:
648 raise vimconn.vimconnConnectionException("Tenant name is empty.")
649
650 org, vdc = self.get_vdc_details()
651 if vdc is None:
652 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
653
654 try:
655 vdcid = vdc.get('id').split(":")[3]
656
657 if self.client._session:
658 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
659 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
660 response = self.perform_request(req_type='GET',
661 url=vdc.get('href'),
662 headers=headers)
663 if response.status_code != 200:
664 self.logger.error("Failed to get vdc content")
665 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
666 else:
667 content = XmlElementTree.fromstring(response.content)
668
669 network_list = []
670 for item in content:
671 if item.tag.split('}')[-1] == 'AvailableNetworks':
672 for net in item:
673 response = self.perform_request(req_type='GET',
674 url=net.get('href'),
675 headers=headers)
676
677 if response.status_code != 200:
678 self.logger.error("Failed to get network content")
679 raise vimconn.vimconnNotFoundException("Failed to get network content")
680 else:
681 net_details = XmlElementTree.fromstring(response.content)
682
683 filter_entry = {}
684 net_uuid = net_details.get('id').split(":")
685 if len(net_uuid) != 4:
686 continue
687 else:
688 net_uuid = net_uuid[3]
689 # create dict entry
690 self.logger.debug("get_network_list(): Adding net {}"
691 " to a list vcd id {} network {}".format(net_uuid,
692 vdcid,
693 net_details.get('name')))
694 filter_entry["name"] = net_details.get('name')
695 filter_entry["id"] = net_uuid
696 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
697 shared = True
698 else:
699 shared = False
700 filter_entry["shared"] = shared
701 filter_entry["tenant_id"] = vdcid
702 if int(net_details.get('status')) == 1:
703 filter_entry["admin_state_up"] = True
704 else:
705 filter_entry["admin_state_up"] = False
706 filter_entry["status"] = "ACTIVE"
707 filter_entry["type"] = "bridge"
708 filtered_entry = filter_entry.copy()
709
710 if filter_dict is not None and filter_dict:
711 # we remove all the key : value we don't care and match only
712 # respected field
713 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
714 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
715 if filter_dict == filter_entry:
716 network_list.append(filtered_entry)
717 else:
718 network_list.append(filtered_entry)
719 except Exception as e:
720 self.logger.debug("Error in get_network_list",exc_info=True)
721 if isinstance(e, vimconn.vimconnException):
722 raise
723 else:
724 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
725
726 self.logger.debug("Returning {}".format(network_list))
727 return network_list
728
729 def get_network(self, net_id):
730 """Method obtains network details of net_id VIM network
731 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
732
733 try:
734 org, vdc = self.get_vdc_details()
735 vdc_id = vdc.get('id').split(":")[3]
736 if self.client._session:
737 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
738 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
739 response = self.perform_request(req_type='GET',
740 url=vdc.get('href'),
741 headers=headers)
742 if response.status_code != 200:
743 self.logger.error("Failed to get vdc content")
744 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
745 else:
746 content = XmlElementTree.fromstring(response.content)
747
748 filter_dict = {}
749
750 for item in content:
751 if item.tag.split('}')[-1] == 'AvailableNetworks':
752 for net in item:
753 response = self.perform_request(req_type='GET',
754 url=net.get('href'),
755 headers=headers)
756
757 if response.status_code != 200:
758 self.logger.error("Failed to get network content")
759 raise vimconn.vimconnNotFoundException("Failed to get network content")
760 else:
761 net_details = XmlElementTree.fromstring(response.content)
762
763 vdc_network_id = net_details.get('id').split(":")
764 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
765 filter_dict["name"] = net_details.get('name')
766 filter_dict["id"] = vdc_network_id[3]
767 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
768 shared = True
769 else:
770 shared = False
771 filter_dict["shared"] = shared
772 filter_dict["tenant_id"] = vdc_id
773 if int(net_details.get('status')) == 1:
774 filter_dict["admin_state_up"] = True
775 else:
776 filter_dict["admin_state_up"] = False
777 filter_dict["status"] = "ACTIVE"
778 filter_dict["type"] = "bridge"
779 self.logger.debug("Returning {}".format(filter_dict))
780 return filter_dict
781 else:
782 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
783 except Exception as e:
784 self.logger.debug("Error in get_network")
785 self.logger.debug(traceback.format_exc())
786 if isinstance(e, vimconn.vimconnException):
787 raise
788 else:
789 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
790
791 return filter_dict
792
793 def delete_network(self, net_id, created_items=None):
794 """
795 Removes a tenant network from VIM and its associated elements
796 :param net_id: VIM identifier of the network, provided by method new_network
797 :param created_items: dictionary with extra items to be deleted. provided by method new_network
798 Returns the network identifier or raises an exception upon error or when network is not found
799 """
800
801 # ############# Stub code for SRIOV #################
802 # dvport_group = self.get_dvport_group(net_id)
803 # if dvport_group:
804 # #delete portgroup
805 # status = self.destroy_dvport_group(net_id)
806 # if status:
807 # # Remove vlanID from persistent info
808 # if net_id in self.persistent_info["used_vlanIDs"]:
809 # del self.persistent_info["used_vlanIDs"][net_id]
810 #
811 # return net_id
812
813 vcd_network = self.get_vcd_network(network_uuid=net_id)
814 if vcd_network is not None and vcd_network:
815 if self.delete_network_action(network_uuid=net_id):
816 return net_id
817 else:
818 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
819
820 def refresh_nets_status(self, net_list):
821 """Get the status of the networks
822 Params: the list of network identifiers
823 Returns a dictionary with:
824 net_id: #VIM id of this network
825 status: #Mandatory. Text with one of:
826 # DELETED (not found at vim)
827 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
828 # OTHER (Vim reported other status not understood)
829 # ERROR (VIM indicates an ERROR status)
830 # ACTIVE, INACTIVE, DOWN (admin down),
831 # BUILD (on building process)
832 #
833 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
834 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
835
836 """
837
838 dict_entry = {}
839 try:
840 for net in net_list:
841 errormsg = ''
842 vcd_network = self.get_vcd_network(network_uuid=net)
843 if vcd_network is not None and vcd_network:
844 if vcd_network['status'] == '1':
845 status = 'ACTIVE'
846 else:
847 status = 'DOWN'
848 else:
849 status = 'DELETED'
850 errormsg = 'Network not found.'
851
852 dict_entry[net] = {'status': status, 'error_msg': errormsg,
853 'vim_info': yaml.safe_dump(vcd_network)}
854 except:
855 self.logger.debug("Error in refresh_nets_status")
856 self.logger.debug(traceback.format_exc())
857
858 return dict_entry
859
860 def get_flavor(self, flavor_id):
861 """Obtain flavor details from the VIM
862 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
863 """
864 if flavor_id not in vimconnector.flavorlist:
865 raise vimconn.vimconnNotFoundException("Flavor not found.")
866 return vimconnector.flavorlist[flavor_id]
867
868 def new_flavor(self, flavor_data):
869 """Adds a tenant flavor to VIM
870 flavor_data contains a dictionary with information, keys:
871 name: flavor name
872 ram: memory (cloud type) in MBytes
873 vpcus: cpus (cloud type)
874 extended: EPA parameters
875 - numas: #items requested in same NUMA
876 memory: number of 1G huge pages memory
877 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
878 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
879 - name: interface name
880 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
881 bandwidth: X Gbps; requested guarantee bandwidth
882 vpci: requested virtual PCI address
883 disk: disk size
884 is_public:
885 #TODO to concrete
886 Returns the flavor identifier"""
887
888 # generate a new uuid put to internal dict and return it.
889 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
890 new_flavor=flavor_data
891 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
892 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
893 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
894
895 if not isinstance(ram, int):
896 raise vimconn.vimconnException("Non-integer value for ram")
897 elif not isinstance(cpu, int):
898 raise vimconn.vimconnException("Non-integer value for cpu")
899 elif not isinstance(disk, int):
900 raise vimconn.vimconnException("Non-integer value for disk")
901
902 extended_flv = flavor_data.get("extended")
903 if extended_flv:
904 numas=extended_flv.get("numas")
905 if numas:
906 for numa in numas:
907 #overwrite ram and vcpus
908 if 'memory' in numa:
909 ram = numa['memory']*1024
910 if 'paired-threads' in numa:
911 cpu = numa['paired-threads']*2
912 elif 'cores' in numa:
913 cpu = numa['cores']
914 elif 'threads' in numa:
915 cpu = numa['threads']
916
917 new_flavor[FLAVOR_RAM_KEY] = ram
918 new_flavor[FLAVOR_VCPUS_KEY] = cpu
919 new_flavor[FLAVOR_DISK_KEY] = disk
920 # generate a new uuid put to internal dict and return it.
921 flavor_id = uuid.uuid4()
922 vimconnector.flavorlist[str(flavor_id)] = new_flavor
923 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
924
925 return str(flavor_id)
926
927 def delete_flavor(self, flavor_id):
928 """Deletes a tenant flavor from VIM identify by its id
929
930 Returns the used id or raise an exception
931 """
932 if flavor_id not in vimconnector.flavorlist:
933 raise vimconn.vimconnNotFoundException("Flavor not found.")
934
935 vimconnector.flavorlist.pop(flavor_id, None)
936 return flavor_id
937
938 def new_image(self, image_dict):
939 """
940 Adds a tenant image to VIM
941 Returns:
942 200, image-id if the image is created
943 <0, message if there is an error
944 """
945
946 return self.get_image_id_from_path(image_dict['location'])
947
948 def delete_image(self, image_id):
949 """
950 Deletes a tenant image from VIM
951 Args:
952 image_id is ID of Image to be deleted
953 Return:
954 returns the image identifier in UUID format or raises an exception on error
955 """
956 conn = self.connect_as_admin()
957 if not conn:
958 raise vimconn.vimconnConnectionException("Failed to connect vCD")
959 # Get Catalog details
960 url_list = [self.url, '/api/catalog/', image_id]
961 catalog_herf = ''.join(url_list)
962
963 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
964 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
965
966 response = self.perform_request(req_type='GET',
967 url=catalog_herf,
968 headers=headers)
969
970 if response.status_code != requests.codes.ok:
971 self.logger.debug("delete_image():GET REST API call {} failed. "\
972 "Return status code {}".format(catalog_herf,
973 response.status_code))
974 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
975
976 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
977 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
978 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
979
980 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
981 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
982 for catalogItem in catalogItems:
983 catalogItem_href = catalogItem.attrib['href']
984
985 response = self.perform_request(req_type='GET',
986 url=catalogItem_href,
987 headers=headers)
988
989 if response.status_code != requests.codes.ok:
990 self.logger.debug("delete_image():GET REST API call {} failed. "\
991 "Return status code {}".format(catalog_herf,
992 response.status_code))
993 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
994 catalogItem,
995 image_id))
996
997 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
998 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
999 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
1000 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
1001
1002 #Remove catalogItem
1003 response = self.perform_request(req_type='DELETE',
1004 url=catalogitem_remove_href,
1005 headers=headers)
1006 if response.status_code == requests.codes.no_content:
1007 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1008 else:
1009 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1010
1011 #Remove catalog
1012 url_list = [self.url, '/api/admin/catalog/', image_id]
1013 catalog_remove_herf = ''.join(url_list)
1014 response = self.perform_request(req_type='DELETE',
1015 url=catalog_remove_herf,
1016 headers=headers)
1017
1018 if response.status_code == requests.codes.no_content:
1019 self.logger.debug("Deleted Catalog {}".format(image_id))
1020 return image_id
1021 else:
1022 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1023
1024
1025 def catalog_exists(self, catalog_name, catalogs):
1026 """
1027
1028 :param catalog_name:
1029 :param catalogs:
1030 :return:
1031 """
1032 for catalog in catalogs:
1033 if catalog['name'] == catalog_name:
1034 return catalog['id']
1035
1036 def create_vimcatalog(self, vca=None, catalog_name=None):
1037 """ Create new catalog entry in vCloud director.
1038
1039 Args
1040 vca: vCloud director.
1041 catalog_name catalog that client wish to create. Note no validation done for a name.
1042 Client must make sure that provide valid string representation.
1043
1044 Returns catalog id if catalog created else None.
1045
1046 """
1047 try:
1048 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1049 if lxml_catalog_element:
1050 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1051 return id_attr_value.split(':')[-1]
1052 catalogs = vca.list_catalogs()
1053 except Exception as ex:
1054 self.logger.error(
1055 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1056 raise
1057 return self.catalog_exists(catalog_name, catalogs)
1058
1059 # noinspection PyIncorrectDocstring
1060 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1061 description='', progress=False, chunk_bytes=128 * 1024):
1062 """
1063 Uploads a OVF file to a vCloud catalog
1064
1065 :param chunk_bytes:
1066 :param progress:
1067 :param description:
1068 :param image_name:
1069 :param vca:
1070 :param catalog_name: (str): The name of the catalog to upload the media.
1071 :param media_file_name: (str): The name of the local media file to upload.
1072 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1073 """
1074 os.path.isfile(media_file_name)
1075 statinfo = os.stat(media_file_name)
1076
1077 # find a catalog entry where we upload OVF.
1078 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1079 # status change.
1080 # if VCD can parse OVF we upload VMDK file
1081 try:
1082 for catalog in vca.list_catalogs():
1083 if catalog_name != catalog['name']:
1084 continue
1085 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1086 data = """
1087 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1088 """.format(catalog_name, description)
1089
1090 if self.client:
1091 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1092 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1093 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1094
1095 response = self.perform_request(req_type='POST',
1096 url=catalog_href,
1097 headers=headers,
1098 data=data)
1099
1100 if response.status_code == requests.codes.created:
1101 catalogItem = XmlElementTree.fromstring(response.content)
1102 entity = [child for child in catalogItem if
1103 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1104 href = entity.get('href')
1105 template = href
1106
1107 response = self.perform_request(req_type='GET',
1108 url=href,
1109 headers=headers)
1110
1111 if response.status_code == requests.codes.ok:
1112 headers['Content-Type'] = 'Content-Type text/xml'
1113 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1114 if result:
1115 transfer_href = result.group(1)
1116
1117 response = self.perform_request(req_type='PUT',
1118 url=transfer_href,
1119 headers=headers,
1120 data=open(media_file_name, 'rb'))
1121 if response.status_code != requests.codes.ok:
1122 self.logger.debug(
1123 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1124 media_file_name))
1125 return False
1126
1127 # TODO fix this with aync block
1128 time.sleep(5)
1129
1130 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1131
1132 # uploading VMDK file
1133 # check status of OVF upload and upload remaining files.
1134 response = self.perform_request(req_type='GET',
1135 url=template,
1136 headers=headers)
1137
1138 if response.status_code == requests.codes.ok:
1139 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1140 if result:
1141 link_href = result.group(1)
1142 # we skip ovf since it already uploaded.
1143 if 'ovf' in link_href:
1144 continue
1145 # The OVF file and VMDK must be in a same directory
1146 head, tail = os.path.split(media_file_name)
1147 file_vmdk = head + '/' + link_href.split("/")[-1]
1148 if not os.path.isfile(file_vmdk):
1149 return False
1150 statinfo = os.stat(file_vmdk)
1151 if statinfo.st_size == 0:
1152 return False
1153 hrefvmdk = link_href
1154
1155 if progress:
1156 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1157 FileTransferSpeed()]
1158 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1159
1160 bytes_transferred = 0
1161 f = open(file_vmdk, 'rb')
1162 while bytes_transferred < statinfo.st_size:
1163 my_bytes = f.read(chunk_bytes)
1164 if len(my_bytes) <= chunk_bytes:
1165 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
1166 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1167 headers['Content-Length'] = str(len(my_bytes))
1168 response = requests.put(url=hrefvmdk,
1169 headers=headers,
1170 data=my_bytes,
1171 verify=False)
1172 if response.status_code == requests.codes.ok:
1173 bytes_transferred += len(my_bytes)
1174 if progress:
1175 progress_bar.update(bytes_transferred)
1176 else:
1177 self.logger.debug(
1178 'file upload failed with error: [{}] {}'.format(response.status_code,
1179 response.content))
1180
1181 f.close()
1182 return False
1183 f.close()
1184 if progress:
1185 progress_bar.finish()
1186 time.sleep(10)
1187 return True
1188 else:
1189 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1190 format(catalog_name, media_file_name))
1191 return False
1192 except Exception as exp:
1193 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1194 .format(catalog_name,media_file_name, exp))
1195 raise vimconn.vimconnException(
1196 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1197 .format(catalog_name,media_file_name, exp))
1198
1199 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1200 return False
1201
1202 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1203 """Upload media file"""
1204 # TODO add named parameters for readability
1205
1206 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1207 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1208
1209 def validate_uuid4(self, uuid_string=None):
1210 """ Method validate correct format of UUID.
1211
1212 Return: true if string represent valid uuid
1213 """
1214 try:
1215 val = uuid.UUID(uuid_string, version=4)
1216 except ValueError:
1217 return False
1218 return True
1219
1220 def get_catalogid(self, catalog_name=None, catalogs=None):
1221 """ Method check catalog and return catalog ID in UUID format.
1222
1223 Args
1224 catalog_name: catalog name as string
1225 catalogs: list of catalogs.
1226
1227 Return: catalogs uuid
1228 """
1229
1230 for catalog in catalogs:
1231 if catalog['name'] == catalog_name:
1232 catalog_id = catalog['id']
1233 return catalog_id
1234 return None
1235
1236 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1237 """ Method check catalog and return catalog name lookup done by catalog UUID.
1238
1239 Args
1240 catalog_name: catalog name as string
1241 catalogs: list of catalogs.
1242
1243 Return: catalogs name or None
1244 """
1245
1246 if not self.validate_uuid4(uuid_string=catalog_uuid):
1247 return None
1248
1249 for catalog in catalogs:
1250 catalog_id = catalog.get('id')
1251 if catalog_id == catalog_uuid:
1252 return catalog.get('name')
1253 return None
1254
1255 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1256 """ Method check catalog and return catalog name lookup done by catalog UUID.
1257
1258 Args
1259 catalog_name: catalog name as string
1260 catalogs: list of catalogs.
1261
1262 Return: catalogs name or None
1263 """
1264
1265 if not self.validate_uuid4(uuid_string=catalog_uuid):
1266 return None
1267
1268 for catalog in catalogs:
1269 catalog_id = catalog.get('id')
1270 if catalog_id == catalog_uuid:
1271 return catalog
1272 return None
1273
1274 def get_image_id_from_path(self, path=None, progress=False):
1275 """ Method upload OVF image to vCloud director.
1276
1277 Each OVF image represented as single catalog entry in vcloud director.
1278 The method check for existing catalog entry. The check done by file name without file extension.
1279
1280 if given catalog name already present method will respond with existing catalog uuid otherwise
1281 it will create new catalog entry and upload OVF file to newly created catalog.
1282
1283 If method can't create catalog entry or upload a file it will throw exception.
1284
1285 Method accept boolean flag progress that will output progress bar. It useful method
1286 for standalone upload use case. In case to test large file upload.
1287
1288 Args
1289 path: - valid path to OVF file.
1290 progress - boolean progress bar show progress bar.
1291
1292 Return: if image uploaded correct method will provide image catalog UUID.
1293 """
1294
1295 if not path:
1296 raise vimconn.vimconnException("Image path can't be None.")
1297
1298 if not os.path.isfile(path):
1299 raise vimconn.vimconnException("Can't read file. File not found.")
1300
1301 if not os.access(path, os.R_OK):
1302 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1303
1304 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1305
1306 dirpath, filename = os.path.split(path)
1307 flname, file_extension = os.path.splitext(path)
1308 if file_extension != '.ovf':
1309 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1310 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1311
1312 catalog_name = os.path.splitext(filename)[0]
1313 catalog_md5_name = hashlib.md5(path).hexdigest()
1314 self.logger.debug("File name {} Catalog Name {} file path {} "
1315 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1316
1317 try:
1318 org,vdc = self.get_vdc_details()
1319 catalogs = org.list_catalogs()
1320 except Exception as exp:
1321 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1322 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1323
1324 if len(catalogs) == 0:
1325 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1326 if self.create_vimcatalog(org, catalog_md5_name) is None:
1327 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1328
1329 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1330 media_name=filename, medial_file_name=path, progress=progress)
1331 if not result:
1332 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1333 return self.get_catalogid(catalog_name, catalogs)
1334 else:
1335 for catalog in catalogs:
1336 # search for existing catalog if we find same name we return ID
1337 # TODO optimize this
1338 if catalog['name'] == catalog_md5_name:
1339 self.logger.debug("Found existing catalog entry for {} "
1340 "catalog id {}".format(catalog_name,
1341 self.get_catalogid(catalog_md5_name, catalogs)))
1342 return self.get_catalogid(catalog_md5_name, catalogs)
1343
1344 # if we didn't find existing catalog we create a new one and upload image.
1345 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1346 if self.create_vimcatalog(org, catalog_md5_name) is None:
1347 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1348
1349 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1350 media_name=filename, medial_file_name=path, progress=progress)
1351 if not result:
1352 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1353
1354 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1355
1356 def get_image_list(self, filter_dict={}):
1357 '''Obtain tenant images from VIM
1358 Filter_dict can be:
1359 name: image name
1360 id: image uuid
1361 checksum: image checksum
1362 location: image path
1363 Returns the image list of dictionaries:
1364 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1365 List can be empty
1366 '''
1367
1368 try:
1369 org, vdc = self.get_vdc_details()
1370 image_list = []
1371 catalogs = org.list_catalogs()
1372 if len(catalogs) == 0:
1373 return image_list
1374 else:
1375 for catalog in catalogs:
1376 catalog_uuid = catalog.get('id')
1377 name = catalog.get('name')
1378 filtered_dict = {}
1379 if filter_dict.get("name") and filter_dict["name"] != name:
1380 continue
1381 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1382 continue
1383 filtered_dict ["name"] = name
1384 filtered_dict ["id"] = catalog_uuid
1385 image_list.append(filtered_dict)
1386
1387 self.logger.debug("List of already created catalog items: {}".format(image_list))
1388 return image_list
1389 except Exception as exp:
1390 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1391
1392 def get_vappid(self, vdc=None, vapp_name=None):
1393 """ Method takes vdc object and vApp name and returns vapp uuid or None
1394
1395 Args:
1396 vdc: The VDC object.
1397 vapp_name: is application vappp name identifier
1398
1399 Returns:
1400 The return vApp name otherwise None
1401 """
1402 if vdc is None or vapp_name is None:
1403 return None
1404 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1405 try:
1406 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity \
1407 if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1408 if len(refs) == 1:
1409 return refs[0].href.split("vapp")[1][1:]
1410 except Exception as e:
1411 self.logger.exception(e)
1412 return False
1413 return None
1414
1415 def check_vapp(self, vdc=None, vapp_uuid=None):
1416 """ Method Method returns True or False if vapp deployed in vCloud director
1417
1418 Args:
1419 vca: Connector to VCA
1420 vdc: The VDC object.
1421 vappid: vappid is application identifier
1422
1423 Returns:
1424 The return True if vApp deployed
1425 :param vdc:
1426 :param vapp_uuid:
1427 """
1428 try:
1429 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1430 if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1431 for ref in refs:
1432 vappid = ref.href.split("vapp")[1][1:]
1433 # find vapp with respected vapp uuid
1434 if vappid == vapp_uuid:
1435 return True
1436 except Exception as e:
1437 self.logger.exception(e)
1438 return False
1439 return False
1440
1441 def get_namebyvappid(self, vapp_uuid=None):
1442 """Method returns vApp name from vCD and lookup done by vapp_id.
1443
1444 Args:
1445 vapp_uuid: vappid is application identifier
1446
1447 Returns:
1448 The return vApp name otherwise None
1449 """
1450 try:
1451 if self.client and vapp_uuid:
1452 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1453 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1454 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1455
1456 response = self.perform_request(req_type='GET',
1457 url=vapp_call,
1458 headers=headers)
1459 #Retry login if session expired & retry sending request
1460 if response.status_code == 403:
1461 response = self.retry_rest('GET', vapp_call)
1462
1463 tree = XmlElementTree.fromstring(response.content)
1464 return tree.attrib['name']
1465 except Exception as e:
1466 self.logger.exception(e)
1467 return None
1468 return None
1469
1470 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1471 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1472 """Adds a VM instance to VIM
1473 Params:
1474 'start': (boolean) indicates if VM must start or created in pause mode.
1475 'image_id','flavor_id': image and flavor VIM id to use for the VM
1476 'net_list': list of interfaces, each one is a dictionary with:
1477 'name': (optional) name for the interface.
1478 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1479 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1480 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1481 'mac_address': (optional) mac address to assign to this interface
1482 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1483 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1484 'type': (mandatory) can be one of:
1485 'virtual', in this case always connected to a network of type 'net_type=bridge'
1486 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1487 can created unconnected
1488 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1489 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1490 are allocated on the same physical NIC
1491 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1492 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1493 or True, it must apply the default VIM behaviour
1494 After execution the method will add the key:
1495 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1496 interface. 'net_list' is modified
1497 'cloud_config': (optional) dictionary with:
1498 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1499 'users': (optional) list of users to be inserted, each item is a dict with:
1500 'name': (mandatory) user name,
1501 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1502 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1503 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1504 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1505 'dest': (mandatory) string with the destination absolute path
1506 'encoding': (optional, by default text). Can be one of:
1507 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1508 'content' (mandatory): string with the content of the file
1509 'permissions': (optional) string with file permissions, typically octal notation '0644'
1510 'owner': (optional) file owner, string with the format 'owner:group'
1511 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1512 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1513 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1514 'size': (mandatory) string with the size of the disk in GB
1515 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1516 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1517 availability_zone_index is None
1518 Returns a tuple with the instance identifier and created_items or raises an exception on error
1519 created_items can be None or a dictionary where this method can include key-values that will be passed to
1520 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1521 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1522 as not present.
1523 """
1524 self.logger.info("Creating new instance for entry {}".format(name))
1525 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1526 "availability_zone_index {} availability_zone_list {}"\
1527 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1528 availability_zone_index, availability_zone_list))
1529
1530 #new vm name = vmname + tenant_id + uuid
1531 new_vm_name = [name, '-', str(uuid.uuid4())]
1532 vmname_andid = ''.join(new_vm_name)
1533
1534 for net in net_list:
1535 if net['type'] == "PCI-PASSTHROUGH":
1536 raise vimconn.vimconnNotSupportedException(
1537 "Current vCD version does not support type : {}".format(net['type']))
1538
1539 if len(net_list) > 10:
1540 raise vimconn.vimconnNotSupportedException(
1541 "The VM hardware versions 7 and above support upto 10 NICs only")
1542
1543 # if vm already deployed we return existing uuid
1544 # we check for presence of VDC, Catalog entry and Flavor.
1545 org, vdc = self.get_vdc_details()
1546 if vdc is None:
1547 raise vimconn.vimconnNotFoundException(
1548 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1549 catalogs = org.list_catalogs()
1550 if catalogs is None:
1551 #Retry once, if failed by refreshing token
1552 self.get_token()
1553 org = Org(self.client, resource=self.client.get_org())
1554 catalogs = org.list_catalogs()
1555 if catalogs is None:
1556 raise vimconn.vimconnNotFoundException(
1557 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1558
1559 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1560 if catalog_hash_name:
1561 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1562 else:
1563 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1564 "(Failed retrieve catalog information {})".format(name, image_id))
1565
1566 # Set vCPU and Memory based on flavor.
1567 vm_cpus = None
1568 vm_memory = None
1569 vm_disk = None
1570 numas = None
1571
1572 if flavor_id is not None:
1573 if flavor_id not in vimconnector.flavorlist:
1574 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1575 "Failed retrieve flavor information "
1576 "flavor id {}".format(name, flavor_id))
1577 else:
1578 try:
1579 flavor = vimconnector.flavorlist[flavor_id]
1580 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1581 vm_memory = flavor[FLAVOR_RAM_KEY]
1582 vm_disk = flavor[FLAVOR_DISK_KEY]
1583 extended = flavor.get("extended", None)
1584 if extended:
1585 numas=extended.get("numas", None)
1586
1587 except Exception as exp:
1588 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1589
1590 # image upload creates template name as catalog name space Template.
1591 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1592 power_on = 'false'
1593 if start:
1594 power_on = 'true'
1595
1596 # client must provide at least one entry in net_list if not we report error
1597 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1598 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1599 primary_net = None
1600 primary_netname = None
1601 primary_net_href = None
1602 network_mode = 'bridged'
1603 if net_list is not None and len(net_list) > 0:
1604 for net in net_list:
1605 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1606 primary_net = net
1607 if primary_net is None:
1608 primary_net = net_list[0]
1609
1610 try:
1611 primary_net_id = primary_net['net_id']
1612 url_list = [self.url, '/api/network/', primary_net_id]
1613 primary_net_href = ''.join(url_list)
1614 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1615 if 'name' in network_dict:
1616 primary_netname = network_dict['name']
1617
1618 except KeyError:
1619 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1620 else:
1621 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1622
1623 # use: 'data', 'bridge', 'mgmt'
1624 # create vApp. Set vcpu and ram based on flavor id.
1625 try:
1626 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1627 if not vdc_obj:
1628 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1629
1630 for retry in (1,2):
1631 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1632 catalog_items = [items.attrib]
1633
1634 if len(catalog_items) == 1:
1635 if self.client:
1636 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1637 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1638
1639 response = self.perform_request(req_type='GET',
1640 url=catalog_items[0].get('href'),
1641 headers=headers)
1642 catalogItem = XmlElementTree.fromstring(response.content)
1643 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1644 vapp_tempalte_href = entity.get("href")
1645
1646 response = self.perform_request(req_type='GET',
1647 url=vapp_tempalte_href,
1648 headers=headers)
1649 if response.status_code != requests.codes.ok:
1650 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1651 response.status_code))
1652 else:
1653 result = (response.content).replace("\n"," ")
1654
1655 vapp_template_tree = XmlElementTree.fromstring(response.content)
1656 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1657 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1658 vm_name = vm_element.get('name')
1659 vm_id = vm_element.get('id')
1660 vm_href = vm_element.get('href')
1661
1662 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1663 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1664 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1665
1666 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1667 vdc_id = vdc.get('id').split(':')[-1]
1668 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1669 vdc_id)
1670 data = """<?xml version="1.0" encoding="UTF-8"?>
1671 <InstantiateVAppTemplateParams
1672 xmlns="http://www.vmware.com/vcloud/v1.5"
1673 name="{}"
1674 deploy="false"
1675 powerOn="false"
1676 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1677 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1678 <Description>Vapp instantiation</Description>
1679 <InstantiationParams>
1680 <NetworkConfigSection>
1681 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1682 <NetworkConfig networkName="{}">
1683 <Configuration>
1684 <ParentNetwork href="{}" />
1685 <FenceMode>bridged</FenceMode>
1686 </Configuration>
1687 </NetworkConfig>
1688 </NetworkConfigSection>
1689 <LeaseSettingsSection
1690 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1691 <ovf:Info>Lease Settings</ovf:Info>
1692 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1693 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1694 </LeaseSettingsSection>
1695 </InstantiationParams>
1696 <Source href="{}"/>
1697 <SourcedItem>
1698 <Source href="{}" id="{}" name="{}"
1699 type="application/vnd.vmware.vcloud.vm+xml"/>
1700 <VmGeneralParams>
1701 <NeedsCustomization>false</NeedsCustomization>
1702 </VmGeneralParams>
1703 <InstantiationParams>
1704 <NetworkConnectionSection>
1705 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1706 <NetworkConnection network="{}">
1707 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1708 <IsConnected>true</IsConnected>
1709 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1710 </NetworkConnection>
1711 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1712 <ovf:Info>Virtual hardware requirements</ovf:Info>
1713 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1714 xmlns:vmw="http://www.vmware.com/schema/ovf">
1715 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1716 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1717 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1718 <rasd:InstanceID>4</rasd:InstanceID>
1719 <rasd:Reservation>0</rasd:Reservation>
1720 <rasd:ResourceType>3</rasd:ResourceType>
1721 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1722 <rasd:Weight>0</rasd:Weight>
1723 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1724 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1725 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1726 <rasd:Description>Memory Size</rasd:Description>
1727 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1728 <rasd:InstanceID>5</rasd:InstanceID>
1729 <rasd:Reservation>0</rasd:Reservation>
1730 <rasd:ResourceType>4</rasd:ResourceType>
1731 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1732 <rasd:Weight>0</rasd:Weight>
1733 </ovf:Item>
1734 </ovf:VirtualHardwareSection>
1735 </InstantiationParams>
1736 </SourcedItem>
1737 <AllEULAsAccepted>false</AllEULAsAccepted>
1738 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1739 primary_netname,
1740 primary_net_href,
1741 vapp_tempalte_href,
1742 vm_href,
1743 vm_id,
1744 vm_name,
1745 primary_netname,
1746 cpu=cpus,
1747 core=cores,
1748 memory=memory_mb)
1749
1750 response = self.perform_request(req_type='POST',
1751 url=instantiate_vapp_href,
1752 headers=headers,
1753 data=data)
1754
1755 if response.status_code != 201:
1756 self.logger.error("REST call {} failed reason : {}"\
1757 "status code : {}".format(instantiate_vapp_href,
1758 response.content,
1759 response.status_code))
1760 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1761 "vAapp {}".format(vmname_andid))
1762 else:
1763 vapptask = self.get_task_from_response(response.content)
1764
1765 if vapptask is None and retry==1:
1766 self.get_token() # Retry getting token
1767 continue
1768 else:
1769 break
1770
1771 if vapptask is None or vapptask is False:
1772 raise vimconn.vimconnUnexpectedResponse(
1773 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1774
1775 # wait for task to complete
1776 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1777
1778 if result.get('status') == 'success':
1779 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1780 else:
1781 raise vimconn.vimconnUnexpectedResponse(
1782 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1783
1784 except Exception as exp:
1785 raise vimconn.vimconnUnexpectedResponse(
1786 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1787
1788 # we should have now vapp in undeployed state.
1789 try:
1790 vdc_obj = VDC(self.client, href=vdc.get('href'))
1791 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1792 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1793 vapp = VApp(self.client, resource=vapp_resource)
1794
1795 except Exception as exp:
1796 raise vimconn.vimconnUnexpectedResponse(
1797 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1798 .format(vmname_andid, exp))
1799
1800 if vapp_uuid is None:
1801 raise vimconn.vimconnUnexpectedResponse(
1802 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1803 vmname_andid))
1804
1805 #Add PCI passthrough/SRIOV configrations
1806 vm_obj = None
1807 pci_devices_info = []
1808 reserve_memory = False
1809
1810 for net in net_list:
1811 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1812 pci_devices_info.append(net)
1813 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1814 reserve_memory = True
1815
1816 #Add PCI
1817 if len(pci_devices_info) > 0:
1818 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1819 vmname_andid ))
1820 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1821 pci_devices_info,
1822 vmname_andid)
1823 if PCI_devices_status:
1824 self.logger.info("Added PCI devives {} to VM {}".format(
1825 pci_devices_info,
1826 vmname_andid)
1827 )
1828 reserve_memory = True
1829 else:
1830 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1831 pci_devices_info,
1832 vmname_andid)
1833 )
1834
1835 # Modify vm disk
1836 if vm_disk:
1837 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1838 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1839 if result :
1840 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1841
1842 #Add new or existing disks to vApp
1843 if disk_list:
1844 added_existing_disk = False
1845 for disk in disk_list:
1846 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1847 image_id = disk['image_id']
1848 # Adding CD-ROM to VM
1849 # will revisit code once specification ready to support this feature
1850 self.insert_media_to_vm(vapp, image_id)
1851 elif "image_id" in disk and disk["image_id"] is not None:
1852 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1853 disk["image_id"] , vapp_uuid))
1854 self.add_existing_disk(catalogs=catalogs,
1855 image_id=disk["image_id"],
1856 size = disk["size"],
1857 template_name=templateName,
1858 vapp_uuid=vapp_uuid
1859 )
1860 added_existing_disk = True
1861 else:
1862 #Wait till added existing disk gets reflected into vCD database/API
1863 if added_existing_disk:
1864 time.sleep(5)
1865 added_existing_disk = False
1866 self.add_new_disk(vapp_uuid, disk['size'])
1867
1868 if numas:
1869 # Assigning numa affinity setting
1870 for numa in numas:
1871 if 'paired-threads-id' in numa:
1872 paired_threads_id = numa['paired-threads-id']
1873 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1874
1875 # add NICs & connect to networks in netlist
1876 try:
1877 vdc_obj = VDC(self.client, href=vdc.get('href'))
1878 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1879 vapp = VApp(self.client, resource=vapp_resource)
1880 vapp_id = vapp_resource.get('id').split(':')[-1]
1881
1882 self.logger.info("Removing primary NIC: ")
1883 # First remove all NICs so that NIC properties can be adjusted as needed
1884 self.remove_primary_network_adapter_from_all_vms(vapp)
1885
1886 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1887 primary_nic_index = 0
1888 nicIndex = 0
1889 for net in net_list:
1890 # openmano uses network id in UUID format.
1891 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1892 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1893 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1894
1895 if 'net_id' not in net:
1896 continue
1897
1898 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1899 #Same will be returned in refresh_vms_status() as vim_interface_id
1900 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1901
1902 interface_net_id = net['net_id']
1903 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1904 interface_network_mode = net['use']
1905
1906 if interface_network_mode == 'mgmt':
1907 primary_nic_index = nicIndex
1908
1909 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1910 - DHCP (The IP address is obtained from a DHCP service.)
1911 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1912 - NONE (No IP addressing mode specified.)"""
1913
1914 if primary_netname is not None:
1915 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1916 nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1917 if len(nets) == 1:
1918 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1919
1920 if interface_net_name != primary_netname:
1921 # connect network to VM - with all DHCP by default
1922 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1923 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1924
1925 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1926 nic_type = 'VMXNET3'
1927 if 'type' in net and net['type'] not in type_list:
1928 # fetching nic type from vnf
1929 if 'model' in net:
1930 if net['model'] is not None:
1931 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1932 nic_type = 'VMXNET3'
1933 else:
1934 nic_type = net['model']
1935
1936 self.logger.info("new_vminstance(): adding network adapter "\
1937 "to a network {}".format(nets[0].get('name')))
1938 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1939 primary_nic_index,
1940 nicIndex,
1941 net,
1942 nic_type=nic_type)
1943 else:
1944 self.logger.info("new_vminstance(): adding network adapter "\
1945 "to a network {}".format(nets[0].get('name')))
1946 if net['type'] in ['SR-IOV', 'VF']:
1947 nic_type = net['type']
1948 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1949 primary_nic_index,
1950 nicIndex,
1951 net,
1952 nic_type=nic_type)
1953 nicIndex += 1
1954
1955 # cloud-init for ssh-key injection
1956 if cloud_config:
1957 # Create a catalog which will be carrying the config drive ISO
1958 # This catalog is deleted during vApp deletion. The catalog name carries
1959 # vApp UUID and thats how it gets identified during its deletion.
1960 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1961 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1962 config_drive_catalog_name))
1963 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1964 if config_drive_catalog_id is None:
1965 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1966 "ISO".format(config_drive_catalog_name)
1967 raise Exception(error_msg)
1968
1969 # Create config-drive ISO
1970 _, userdata = self._create_user_data(cloud_config)
1971 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1972 iso_path = self.create_config_drive_iso(userdata)
1973 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1974
1975 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1976 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1977 # Attach the config-drive ISO to the VM
1978 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1979 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
1980 time.sleep(5)
1981 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1982 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1983
1984 # If VM has PCI devices or SRIOV reserve memory for VM
1985 if reserve_memory:
1986 self.reserve_memory_for_all_vms(vapp, memory_mb)
1987
1988 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1989
1990 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1991 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1992 if result.get('status') == 'success':
1993 self.logger.info("new_vminstance(): Successfully power on "\
1994 "vApp {}".format(vmname_andid))
1995 else:
1996 self.logger.error("new_vminstance(): failed to power on vApp "\
1997 "{}".format(vmname_andid))
1998
1999 except Exception as exp:
2000 try:
2001 self.delete_vminstance(vapp_uuid)
2002 except Exception as exp2:
2003 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2004 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2005 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
2006 .format(name, exp))
2007 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
2008 .format(name, exp))
2009
2010 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2011 wait_time = 0
2012 vapp_uuid = None
2013 while wait_time <= MAX_WAIT_TIME:
2014 try:
2015 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2016 vapp = VApp(self.client, resource=vapp_resource)
2017 except Exception as exp:
2018 raise vimconn.vimconnUnexpectedResponse(
2019 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
2020 .format(vmname_andid, exp))
2021
2022 #if vapp and vapp.me.deployed:
2023 if vapp and vapp_resource.get('deployed') == 'true':
2024 vapp_uuid = vapp_resource.get('id').split(':')[-1]
2025 break
2026 else:
2027 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
2028 time.sleep(INTERVAL_TIME)
2029
2030 wait_time +=INTERVAL_TIME
2031
2032 #SET Affinity Rule for VM
2033 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2034 #While creating VIM account user has to pass the Host Group names in availability_zone list
2035 #"availability_zone" is a part of VIM "config" parameters
2036 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2037 #Host groups are referred as availability zones
2038 #With following procedure, deployed VM will be added into a VM group.
2039 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2040 if(availability_zone_list):
2041 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2042 #Admin access required for creating Affinity rules
2043 client = self.connect_as_admin()
2044 if not client:
2045 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2046 else:
2047 self.client = client
2048 if self.client:
2049 headers = {'Accept':'application/*+xml;version=27.0',
2050 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2051 #Step1: Get provider vdc details from organization
2052 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2053 if pvdc_href is not None:
2054 #Step2: Found required pvdc, now get resource pool information
2055 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2056 if respool_href is None:
2057 #Raise error if respool_href not found
2058 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2059 .format(pvdc_href)
2060 self.log_message(msg)
2061
2062 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2063 # get availability Zone
2064 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2065 # check if provided av zone(hostGroup) is present in vCD VIM
2066 status = self.check_availibility_zone(vm_az, respool_href, headers)
2067 if status is False:
2068 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2069 "resource pool {} status: {}".format(vm_az,respool_href,status)
2070 self.log_message(msg)
2071 else:
2072 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2073
2074 #Step4: Find VM group references to create vm group
2075 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2076 if vmgrp_href == None:
2077 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2078 self.log_message(msg)
2079
2080 #Step5: Create a VmGroup with name az_VmGroup
2081 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2082 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2083 if status is not True:
2084 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2085 self.log_message(msg)
2086
2087 #VM Group url to add vms to vm group
2088 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2089
2090 #Step6: Add VM to VM Group
2091 #Find VM uuid from vapp_uuid
2092 vm_details = self.get_vapp_details_rest(vapp_uuid)
2093 vm_uuid = vm_details['vmuuid']
2094
2095 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2096 if status is not True:
2097 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2098 self.log_message(msg)
2099
2100 #Step7: Create VM to Host affinity rule
2101 addrule_href = self.get_add_rule_reference (respool_href, headers)
2102 if addrule_href is None:
2103 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2104 .format(respool_href)
2105 self.log_message(msg)
2106
2107 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2108 if status is False:
2109 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2110 .format(name, vm_az)
2111 self.log_message(msg)
2112 else:
2113 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2114 .format(name, vm_az))
2115 #Reset token to a normal user to perform other operations
2116 self.get_token()
2117
2118 if vapp_uuid is not None:
2119 return vapp_uuid, None
2120 else:
2121 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2122
2123 def create_config_drive_iso(self, user_data):
2124 tmpdir = tempfile.mkdtemp()
2125 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2126 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2127 os.makedirs(latest_dir)
2128 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2129 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2130 userdata_file_obj.write(user_data)
2131 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2132 "launch_index": 0,
2133 "name": "ConfigDrive",
2134 "uuid": str(uuid.uuid4())}
2135 )
2136 )
2137 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2138 iso_path=iso_path, source_dir_path=tmpdir)
2139 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2140 try:
2141 FNULL = open(os.devnull, 'w')
2142 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2143 except subprocess.CalledProcessError as e:
2144 shutil.rmtree(tmpdir, ignore_errors=True)
2145 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2146 self.logger.error(error_msg)
2147 raise Exception(error_msg)
2148 return iso_path
2149
2150 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2151 if not os.path.isfile(iso_file_path):
2152 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2153 self.logger.error(error_msg)
2154 raise Exception(error_msg)
2155 iso_file_stat = os.stat(iso_file_path)
2156 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2157 <Media
2158 xmlns="http://www.vmware.com/vcloud/v1.5"
2159 name="{iso_name}"
2160 size="{iso_size}"
2161 imageType="iso">
2162 <Description>ISO image for config-drive</Description>
2163 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2164 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2165 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2166 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2167 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2168 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2169
2170 if response.status_code != 201:
2171 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2172 self.logger.error(error_msg)
2173 raise Exception(error_msg)
2174
2175 catalogItem = XmlElementTree.fromstring(response.content)
2176 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2177 entity_href = entity.get('href')
2178
2179 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2180 if response.status_code != 200:
2181 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2182
2183 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2184 if match:
2185 media_upload_href = match.group(1)
2186 else:
2187 raise Exception('Could not parse the upload URL for the media file from the last response')
2188 upload_iso_task = self.get_task_from_response(response.content)
2189 headers['Content-Type'] = 'application/octet-stream'
2190 response = self.perform_request(req_type='PUT',
2191 url=media_upload_href,
2192 headers=headers,
2193 data=open(iso_file_path, 'rb'))
2194
2195 if response.status_code != 200:
2196 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2197 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2198 if result.get('status') != 'success':
2199 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2200
2201 def get_vcd_availibility_zones(self,respool_href, headers):
2202 """ Method to find presence of av zone is VIM resource pool
2203
2204 Args:
2205 respool_href - resource pool href
2206 headers - header information
2207
2208 Returns:
2209 vcd_az - list of azone present in vCD
2210 """
2211 vcd_az = []
2212 url=respool_href
2213 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2214
2215 if resp.status_code != requests.codes.ok:
2216 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2217 else:
2218 #Get the href to hostGroups and find provided hostGroup is present in it
2219 resp_xml = XmlElementTree.fromstring(resp.content)
2220 for child in resp_xml:
2221 if 'VMWProviderVdcResourcePool' in child.tag:
2222 for schild in child:
2223 if 'Link' in schild.tag:
2224 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2225 hostGroup = schild.attrib.get('href')
2226 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2227 if hg_resp.status_code != requests.codes.ok:
2228 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2229 else:
2230 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2231 for hostGroup in hg_resp_xml:
2232 if 'HostGroup' in hostGroup.tag:
2233 #append host group name to the list
2234 vcd_az.append(hostGroup.attrib.get("name"))
2235 return vcd_az
2236
2237
2238 def set_availability_zones(self):
2239 """
2240 Set vim availability zone
2241 """
2242
2243 vim_availability_zones = None
2244 availability_zone = None
2245 if 'availability_zone' in self.config:
2246 vim_availability_zones = self.config.get('availability_zone')
2247 if isinstance(vim_availability_zones, str):
2248 availability_zone = [vim_availability_zones]
2249 elif isinstance(vim_availability_zones, list):
2250 availability_zone = vim_availability_zones
2251 else:
2252 return availability_zone
2253
2254 return availability_zone
2255
2256
2257 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2258 """
2259 Return the availability zone to be used by the created VM.
2260 returns: The VIM availability zone to be used or None
2261 """
2262 if availability_zone_index is None:
2263 if not self.config.get('availability_zone'):
2264 return None
2265 elif isinstance(self.config.get('availability_zone'), str):
2266 return self.config['availability_zone']
2267 else:
2268 return self.config['availability_zone'][0]
2269
2270 vim_availability_zones = self.availability_zone
2271
2272 # check if VIM offer enough availability zones describe in the VNFD
2273 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2274 # check if all the names of NFV AV match VIM AV names
2275 match_by_index = False
2276 for av in availability_zone_list:
2277 if av not in vim_availability_zones:
2278 match_by_index = True
2279 break
2280 if match_by_index:
2281 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2282 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2283 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2284 self.logger.debug("VIM Availability zones will be used by index")
2285 return vim_availability_zones[availability_zone_index]
2286 else:
2287 return availability_zone_list[availability_zone_index]
2288 else:
2289 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2290
2291
2292 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2293 """ Method to create VM to Host Affinity rule in vCD
2294
2295 Args:
2296 addrule_href - href to make a POST request
2297 vmgrpname - name of the VM group created
2298 hostgrpnmae - name of the host group created earlier
2299 polarity - Affinity or Anti-affinity (default: Affinity)
2300 headers - headers to make REST call
2301
2302 Returns:
2303 True- if rule is created
2304 False- Failed to create rule due to some error
2305
2306 """
2307 task_status = False
2308 rule_name = polarity + "_" + vmgrpname
2309 payload = """<?xml version="1.0" encoding="UTF-8"?>
2310 <vmext:VMWVmHostAffinityRule
2311 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2312 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2313 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2314 <vcloud:Name>{}</vcloud:Name>
2315 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2316 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2317 <vcloud:Polarity>{}</vcloud:Polarity>
2318 <vmext:HostGroupName>{}</vmext:HostGroupName>
2319 <vmext:VmGroupName>{}</vmext:VmGroupName>
2320 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2321
2322 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2323
2324 if resp.status_code != requests.codes.accepted:
2325 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2326 task_status = False
2327 return task_status
2328 else:
2329 affinity_task = self.get_task_from_response(resp.content)
2330 self.logger.debug ("affinity_task: {}".format(affinity_task))
2331 if affinity_task is None or affinity_task is False:
2332 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2333 # wait for task to complete
2334 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2335 if result.get('status') == 'success':
2336 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2337 return True
2338 else:
2339 raise vimconn.vimconnUnexpectedResponse(
2340 "failed to create affinity rule {}".format(rule_name))
2341
2342
2343 def get_add_rule_reference (self, respool_href, headers):
2344 """ This method finds href to add vm to host affinity rule to vCD
2345
2346 Args:
2347 respool_href- href to resource pool
2348 headers- header information to make REST call
2349
2350 Returns:
2351 None - if no valid href to add rule found or
2352 addrule_href - href to add vm to host affinity rule of resource pool
2353 """
2354 addrule_href = None
2355 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2356
2357 if resp.status_code != requests.codes.ok:
2358 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2359 else:
2360
2361 resp_xml = XmlElementTree.fromstring(resp.content)
2362 for child in resp_xml:
2363 if 'VMWProviderVdcResourcePool' in child.tag:
2364 for schild in child:
2365 if 'Link' in schild.tag:
2366 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2367 schild.attrib.get('rel') == "add":
2368 addrule_href = schild.attrib.get('href')
2369 break
2370
2371 return addrule_href
2372
2373
2374 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2375 """ Method to add deployed VM to newly created VM Group.
2376 This is required to create VM to Host affinity in vCD
2377
2378 Args:
2379 vm_uuid- newly created vm uuid
2380 vmGroupNameURL- URL to VM Group name
2381 vmGroup_name- Name of VM group created
2382 headers- Headers for REST request
2383
2384 Returns:
2385 True- if VM added to VM group successfully
2386 False- if any error encounter
2387 """
2388
2389 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2390
2391 if addvm_resp.status_code != requests.codes.ok:
2392 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2393 .format(vmGroupNameURL, addvm_resp.status_code))
2394 return False
2395 else:
2396 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2397 for child in resp_xml:
2398 if child.tag.split('}')[1] == 'Link':
2399 if child.attrib.get("rel") == "addVms":
2400 addvmtogrpURL = child.attrib.get("href")
2401
2402 #Get vm details
2403 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2404 vmdetailsURL = ''.join(url_list)
2405
2406 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2407
2408 if resp.status_code != requests.codes.ok:
2409 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2410 return False
2411
2412 #Parse VM details
2413 resp_xml = XmlElementTree.fromstring(resp.content)
2414 if resp_xml.tag.split('}')[1] == "Vm":
2415 vm_id = resp_xml.attrib.get("id")
2416 vm_name = resp_xml.attrib.get("name")
2417 vm_href = resp_xml.attrib.get("href")
2418 #print vm_id, vm_name, vm_href
2419 #Add VM into VMgroup
2420 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2421 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2422 xmlns="http://www.vmware.com/vcloud/versions" \
2423 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2424 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2425 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2426 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2427 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2428 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2429 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2430 <ns2:VmReference href="{}" id="{}" name="{}" \
2431 type="application/vnd.vmware.vcloud.vm+xml" />\
2432 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2433
2434 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2435
2436 if addvmtogrp_resp.status_code != requests.codes.accepted:
2437 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2438 return False
2439 else:
2440 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2441 return True
2442
2443
2444 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2445 """Method to create a VM group in vCD
2446
2447 Args:
2448 vmgroup_name : Name of VM group to be created
2449 vmgroup_href : href for vmgroup
2450 headers- Headers for REST request
2451 """
2452 #POST to add URL with required data
2453 vmgroup_status = False
2454 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2455 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2456 <vmCount>1</vmCount>\
2457 </VMWVmGroup>""".format(vmgroup_name)
2458 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2459
2460 if resp.status_code != requests.codes.accepted:
2461 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2462 return vmgroup_status
2463 else:
2464 vmgroup_task = self.get_task_from_response(resp.content)
2465 if vmgroup_task is None or vmgroup_task is False:
2466 raise vimconn.vimconnUnexpectedResponse(
2467 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2468
2469 # wait for task to complete
2470 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2471
2472 if result.get('status') == 'success':
2473 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2474 #time.sleep(10)
2475 vmgroup_status = True
2476 return vmgroup_status
2477 else:
2478 raise vimconn.vimconnUnexpectedResponse(\
2479 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2480
2481
2482 def find_vmgroup_reference(self, url, headers):
2483 """ Method to create a new VMGroup which is required to add created VM
2484 Args:
2485 url- resource pool href
2486 headers- header information
2487
2488 Returns:
2489 returns href to VM group to create VM group
2490 """
2491 #Perform GET on resource pool to find 'add' link to create VMGroup
2492 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2493 vmgrp_href = None
2494 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2495
2496 if resp.status_code != requests.codes.ok:
2497 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2498 else:
2499 #Get the href to add vmGroup to vCD
2500 resp_xml = XmlElementTree.fromstring(resp.content)
2501 for child in resp_xml:
2502 if 'VMWProviderVdcResourcePool' in child.tag:
2503 for schild in child:
2504 if 'Link' in schild.tag:
2505 #Find href with type VMGroup and rel with add
2506 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2507 and schild.attrib.get('rel') == "add":
2508 vmgrp_href = schild.attrib.get('href')
2509 return vmgrp_href
2510
2511
2512 def check_availibility_zone(self, az, respool_href, headers):
2513 """ Method to verify requested av zone is present or not in provided
2514 resource pool
2515
2516 Args:
2517 az - name of hostgroup (availibility_zone)
2518 respool_href - Resource Pool href
2519 headers - Headers to make REST call
2520 Returns:
2521 az_found - True if availibility_zone is found else False
2522 """
2523 az_found = False
2524 headers['Accept']='application/*+xml;version=27.0'
2525 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2526
2527 if resp.status_code != requests.codes.ok:
2528 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2529 else:
2530 #Get the href to hostGroups and find provided hostGroup is present in it
2531 resp_xml = XmlElementTree.fromstring(resp.content)
2532
2533 for child in resp_xml:
2534 if 'VMWProviderVdcResourcePool' in child.tag:
2535 for schild in child:
2536 if 'Link' in schild.tag:
2537 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2538 hostGroup_href = schild.attrib.get('href')
2539 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2540 if hg_resp.status_code != requests.codes.ok:
2541 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2542 else:
2543 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2544 for hostGroup in hg_resp_xml:
2545 if 'HostGroup' in hostGroup.tag:
2546 if hostGroup.attrib.get("name") == az:
2547 az_found = True
2548 break
2549 return az_found
2550
2551
2552 def get_pvdc_for_org(self, org_vdc, headers):
2553 """ This method gets provider vdc references from organisation
2554
2555 Args:
2556 org_vdc - name of the organisation VDC to find pvdc
2557 headers - headers to make REST call
2558
2559 Returns:
2560 None - if no pvdc href found else
2561 pvdc_href - href to pvdc
2562 """
2563
2564 #Get provider VDC references from vCD
2565 pvdc_href = None
2566 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2567 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2568 url = ''.join(url_list)
2569
2570 response = self.perform_request(req_type='GET',url=url, headers=headers)
2571 if response.status_code != requests.codes.ok:
2572 self.logger.debug ("REST API call {} failed. Return status code {}"\
2573 .format(url, response.status_code))
2574 else:
2575 xmlroot_response = XmlElementTree.fromstring(response.content)
2576 for child in xmlroot_response:
2577 if 'ProviderVdcReference' in child.tag:
2578 pvdc_href = child.attrib.get('href')
2579 #Get vdcReferences to find org
2580 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2581 if pvdc_resp.status_code != requests.codes.ok:
2582 raise vimconn.vimconnException("REST API call {} failed. "\
2583 "Return status code {}"\
2584 .format(url, pvdc_resp.status_code))
2585
2586 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2587 for child in pvdc_resp_xml:
2588 if 'Link' in child.tag:
2589 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2590 vdc_href = child.attrib.get('href')
2591
2592 #Check if provided org is present in vdc
2593 vdc_resp = self.perform_request(req_type='GET',
2594 url=vdc_href,
2595 headers=headers)
2596 if vdc_resp.status_code != requests.codes.ok:
2597 raise vimconn.vimconnException("REST API call {} failed. "\
2598 "Return status code {}"\
2599 .format(url, vdc_resp.status_code))
2600 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2601 for child in vdc_resp_xml:
2602 if 'VdcReference' in child.tag:
2603 if child.attrib.get('name') == org_vdc:
2604 return pvdc_href
2605
2606
2607 def get_resource_pool_details(self, pvdc_href, headers):
2608 """ Method to get resource pool information.
2609 Host groups are property of resource group.
2610 To get host groups, we need to GET details of resource pool.
2611
2612 Args:
2613 pvdc_href: href to pvdc details
2614 headers: headers
2615
2616 Returns:
2617 respool_href - Returns href link reference to resource pool
2618 """
2619 respool_href = None
2620 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2621
2622 if resp.status_code != requests.codes.ok:
2623 self.logger.debug ("REST API call {} failed. Return status code {}"\
2624 .format(pvdc_href, resp.status_code))
2625 else:
2626 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2627 for child in respool_resp_xml:
2628 if 'Link' in child.tag:
2629 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2630 respool_href = child.attrib.get("href")
2631 break
2632 return respool_href
2633
2634
2635 def log_message(self, msg):
2636 """
2637 Method to log error messages related to Affinity rule creation
2638 in new_vminstance & raise Exception
2639 Args :
2640 msg - Error message to be logged
2641
2642 """
2643 #get token to connect vCD as a normal user
2644 self.get_token()
2645 self.logger.debug(msg)
2646 raise vimconn.vimconnException(msg)
2647
2648
2649 ##
2650 ##
2651 ## based on current discussion
2652 ##
2653 ##
2654 ## server:
2655 # created: '2016-09-08T11:51:58'
2656 # description: simple-instance.linux1.1
2657 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2658 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2659 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2660 # status: ACTIVE
2661 # error_msg:
2662 # interfaces: …
2663 #
2664 def get_vminstance(self, vim_vm_uuid=None):
2665 """Returns the VM instance information from VIM"""
2666
2667 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2668
2669 org, vdc = self.get_vdc_details()
2670 if vdc is None:
2671 raise vimconn.vimconnConnectionException(
2672 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2673
2674 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2675 if not vm_info_dict:
2676 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2677 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2678
2679 status_key = vm_info_dict['status']
2680 error = ''
2681 try:
2682 vm_dict = {'created': vm_info_dict['created'],
2683 'description': vm_info_dict['name'],
2684 'status': vcdStatusCode2manoFormat[int(status_key)],
2685 'hostId': vm_info_dict['vmuuid'],
2686 'error_msg': error,
2687 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2688
2689 if 'interfaces' in vm_info_dict:
2690 vm_dict['interfaces'] = vm_info_dict['interfaces']
2691 else:
2692 vm_dict['interfaces'] = []
2693 except KeyError:
2694 vm_dict = {'created': '',
2695 'description': '',
2696 'status': vcdStatusCode2manoFormat[int(-1)],
2697 'hostId': vm_info_dict['vmuuid'],
2698 'error_msg': "Inconsistency state",
2699 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2700
2701 return vm_dict
2702
2703 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2704 """Method poweroff and remove VM instance from vcloud director network.
2705
2706 Args:
2707 vm__vim_uuid: VM UUID
2708
2709 Returns:
2710 Returns the instance identifier
2711 """
2712
2713 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2714
2715 org, vdc = self.get_vdc_details()
2716 vdc_obj = VDC(self.client, href=vdc.get('href'))
2717 if vdc_obj is None:
2718 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2719 self.tenant_name))
2720 raise vimconn.vimconnException(
2721 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2722
2723 try:
2724 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2725 if vapp_name is None:
2726 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2727 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2728 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2729 vapp_resource = vdc_obj.get_vapp(vapp_name)
2730 vapp = VApp(self.client, resource=vapp_resource)
2731
2732 # Delete vApp and wait for status change if task executed and vApp is None.
2733
2734 if vapp:
2735 if vapp_resource.get('deployed') == 'true':
2736 self.logger.info("Powering off vApp {}".format(vapp_name))
2737 #Power off vApp
2738 powered_off = False
2739 wait_time = 0
2740 while wait_time <= MAX_WAIT_TIME:
2741 power_off_task = vapp.power_off()
2742 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2743
2744 if result.get('status') == 'success':
2745 powered_off = True
2746 break
2747 else:
2748 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2749 time.sleep(INTERVAL_TIME)
2750
2751 wait_time +=INTERVAL_TIME
2752 if not powered_off:
2753 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2754 else:
2755 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2756
2757 #Undeploy vApp
2758 self.logger.info("Undeploy vApp {}".format(vapp_name))
2759 wait_time = 0
2760 undeployed = False
2761 while wait_time <= MAX_WAIT_TIME:
2762 vapp = VApp(self.client, resource=vapp_resource)
2763 if not vapp:
2764 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2765 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2766 undeploy_task = vapp.undeploy()
2767
2768 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2769 if result.get('status') == 'success':
2770 undeployed = True
2771 break
2772 else:
2773 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2774 time.sleep(INTERVAL_TIME)
2775
2776 wait_time +=INTERVAL_TIME
2777
2778 if not undeployed:
2779 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2780
2781 # delete vapp
2782 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2783
2784 if vapp is not None:
2785 wait_time = 0
2786 result = False
2787
2788 while wait_time <= MAX_WAIT_TIME:
2789 vapp = VApp(self.client, resource=vapp_resource)
2790 if not vapp:
2791 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2792 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2793
2794 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2795
2796 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2797 if result.get('status') == 'success':
2798 break
2799 else:
2800 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2801 time.sleep(INTERVAL_TIME)
2802
2803 wait_time +=INTERVAL_TIME
2804
2805 if result is None:
2806 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2807 else:
2808 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2809 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2810 catalog_list = self.get_image_list()
2811 try:
2812 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2813 if catalog_['name'] == config_drive_catalog_name][0]
2814 except IndexError:
2815 pass
2816 if config_drive_catalog_id:
2817 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2818 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2819 self.delete_image(config_drive_catalog_id)
2820 return vm__vim_uuid
2821 except:
2822 self.logger.debug(traceback.format_exc())
2823 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2824
2825
2826 def refresh_vms_status(self, vm_list):
2827 """Get the status of the virtual machines and their interfaces/ports
2828 Params: the list of VM identifiers
2829 Returns a dictionary with:
2830 vm_id: #VIM id of this Virtual Machine
2831 status: #Mandatory. Text with one of:
2832 # DELETED (not found at vim)
2833 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2834 # OTHER (Vim reported other status not understood)
2835 # ERROR (VIM indicates an ERROR status)
2836 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2837 # CREATING (on building process), ERROR
2838 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2839 #
2840 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2841 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2842 interfaces:
2843 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2844 mac_address: #Text format XX:XX:XX:XX:XX:XX
2845 vim_net_id: #network id where this interface is connected
2846 vim_interface_id: #interface/port VIM id
2847 ip_address: #null, or text with IPv4, IPv6 address
2848 """
2849
2850 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2851
2852 org,vdc = self.get_vdc_details()
2853 if vdc is None:
2854 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2855
2856 vms_dict = {}
2857 nsx_edge_list = []
2858 for vmuuid in vm_list:
2859 vapp_name = self.get_namebyvappid(vmuuid)
2860 if vapp_name is not None:
2861
2862 try:
2863 vm_pci_details = self.get_vm_pci_details(vmuuid)
2864 vdc_obj = VDC(self.client, href=vdc.get('href'))
2865 vapp_resource = vdc_obj.get_vapp(vapp_name)
2866 the_vapp = VApp(self.client, resource=vapp_resource)
2867
2868 vm_details = {}
2869 for vm in the_vapp.get_all_vms():
2870 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2871 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2872 response = self.perform_request(req_type='GET',
2873 url=vm.get('href'),
2874 headers=headers)
2875
2876 if response.status_code != 200:
2877 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2878 "status code : {}".format(vm.get('href'),
2879 response.content,
2880 response.status_code))
2881 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2882 "VM details")
2883 xmlroot = XmlElementTree.fromstring(response.content)
2884
2885
2886 result = response.content.replace("\n"," ")
2887 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2888 if hdd_match:
2889 hdd_mb = hdd_match.group(1)
2890 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2891 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2892 if cpus_match:
2893 cpus = cpus_match.group(1)
2894 vm_details['cpus'] = int(cpus) if cpus else None
2895 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2896 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2897 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2898 vm_details['id'] = xmlroot.get('id')
2899 vm_details['name'] = xmlroot.get('name')
2900 vm_info = [vm_details]
2901 if vm_pci_details:
2902 vm_info[0].update(vm_pci_details)
2903
2904 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2905 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2906 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2907
2908 # get networks
2909 vm_ip = None
2910 vm_mac = None
2911 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2912 for network in networks:
2913 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2914 vm_mac = mac_s.group(1) if mac_s else None
2915 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2916 vm_ip = ip_s.group(1) if ip_s else None
2917
2918 if vm_ip is None:
2919 if not nsx_edge_list:
2920 nsx_edge_list = self.get_edge_details()
2921 if nsx_edge_list is None:
2922 raise vimconn.vimconnException("refresh_vms_status:"\
2923 "Failed to get edge details from NSX Manager")
2924 if vm_mac is not None:
2925 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2926
2927 net_s = re.search('network="(.*?)"',network)
2928 network_name = net_s.group(1) if net_s else None
2929
2930 vm_net_id = self.get_network_id_by_name(network_name)
2931 interface = {"mac_address": vm_mac,
2932 "vim_net_id": vm_net_id,
2933 "vim_interface_id": vm_net_id,
2934 "ip_address": vm_ip}
2935
2936 vm_dict["interfaces"].append(interface)
2937
2938 # add a vm to vm dict
2939 vms_dict.setdefault(vmuuid, vm_dict)
2940 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2941 except Exception as exp:
2942 self.logger.debug("Error in response {}".format(exp))
2943 self.logger.debug(traceback.format_exc())
2944
2945 return vms_dict
2946
2947
2948 def get_edge_details(self):
2949 """Get the NSX edge list from NSX Manager
2950 Returns list of NSX edges
2951 """
2952 edge_list = []
2953 rheaders = {'Content-Type': 'application/xml'}
2954 nsx_api_url = '/api/4.0/edges'
2955
2956 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2957
2958 try:
2959 resp = requests.get(self.nsx_manager + nsx_api_url,
2960 auth = (self.nsx_user, self.nsx_password),
2961 verify = False, headers = rheaders)
2962 if resp.status_code == requests.codes.ok:
2963 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2964 for edge_pages in paged_Edge_List:
2965 if edge_pages.tag == 'edgePage':
2966 for edge_summary in edge_pages:
2967 if edge_summary.tag == 'pagingInfo':
2968 for element in edge_summary:
2969 if element.tag == 'totalCount' and element.text == '0':
2970 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2971 .format(self.nsx_manager))
2972
2973 if edge_summary.tag == 'edgeSummary':
2974 for element in edge_summary:
2975 if element.tag == 'id':
2976 edge_list.append(element.text)
2977 else:
2978 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2979 .format(self.nsx_manager))
2980
2981 if not edge_list:
2982 raise vimconn.vimconnException("get_edge_details: "\
2983 "No NSX edge details found: {}"
2984 .format(self.nsx_manager))
2985 else:
2986 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2987 return edge_list
2988 else:
2989 self.logger.debug("get_edge_details: "
2990 "Failed to get NSX edge details from NSX Manager: {}"
2991 .format(resp.content))
2992 return None
2993
2994 except Exception as exp:
2995 self.logger.debug("get_edge_details: "\
2996 "Failed to get NSX edge details from NSX Manager: {}"
2997 .format(exp))
2998 raise vimconn.vimconnException("get_edge_details: "\
2999 "Failed to get NSX edge details from NSX Manager: {}"
3000 .format(exp))
3001
3002
3003 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3004 """Get IP address details from NSX edges, using the MAC address
3005 PARAMS: nsx_edges : List of NSX edges
3006 mac_address : Find IP address corresponding to this MAC address
3007 Returns: IP address corrresponding to the provided MAC address
3008 """
3009
3010 ip_addr = None
3011 rheaders = {'Content-Type': 'application/xml'}
3012
3013 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3014
3015 try:
3016 for edge in nsx_edges:
3017 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
3018
3019 resp = requests.get(self.nsx_manager + nsx_api_url,
3020 auth = (self.nsx_user, self.nsx_password),
3021 verify = False, headers = rheaders)
3022
3023 if resp.status_code == requests.codes.ok:
3024 dhcp_leases = XmlElementTree.fromstring(resp.text)
3025 for child in dhcp_leases:
3026 if child.tag == 'dhcpLeaseInfo':
3027 dhcpLeaseInfo = child
3028 for leaseInfo in dhcpLeaseInfo:
3029 for elem in leaseInfo:
3030 if (elem.tag)=='macAddress':
3031 edge_mac_addr = elem.text
3032 if (elem.tag)=='ipAddress':
3033 ip_addr = elem.text
3034 if edge_mac_addr is not None:
3035 if edge_mac_addr == mac_address:
3036 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
3037 .format(ip_addr, mac_address,edge))
3038 return ip_addr
3039 else:
3040 self.logger.debug("get_ipaddr_from_NSXedge: "\
3041 "Error occurred while getting DHCP lease info from NSX Manager: {}"
3042 .format(resp.content))
3043
3044 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
3045 return None
3046
3047 except XmlElementTree.ParseError as Err:
3048 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
3049
3050 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
3051 """Send and action over a VM instance from VIM
3052 Returns the vm_id if the action was successfully sent to the VIM"""
3053
3054 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
3055 if vm__vim_uuid is None or action_dict is None:
3056 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
3057
3058 org, vdc = self.get_vdc_details()
3059 if vdc is None:
3060 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3061
3062 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3063 if vapp_name is None:
3064 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3065 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3066 else:
3067 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3068
3069 try:
3070 vdc_obj = VDC(self.client, href=vdc.get('href'))
3071 vapp_resource = vdc_obj.get_vapp(vapp_name)
3072 vapp = VApp(self.client, resource=vapp_resource)
3073 if "start" in action_dict:
3074 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3075 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3076 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3077 self.instance_actions_result("start", result, vapp_name)
3078 elif "rebuild" in action_dict:
3079 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3080 rebuild_task = vapp.deploy(power_on=True)
3081 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3082 self.instance_actions_result("rebuild", result, vapp_name)
3083 elif "pause" in action_dict:
3084 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3085 pause_task = vapp.undeploy(action='suspend')
3086 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3087 self.instance_actions_result("pause", result, vapp_name)
3088 elif "resume" in action_dict:
3089 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3090 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3091 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3092 self.instance_actions_result("resume", result, vapp_name)
3093 elif "shutoff" in action_dict or "shutdown" in action_dict:
3094 action_name , value = list(action_dict.items())[0]
3095 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3096 shutdown_task = vapp.shutdown()
3097 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3098 if action_name == "shutdown":
3099 self.instance_actions_result("shutdown", result, vapp_name)
3100 else:
3101 self.instance_actions_result("shutoff", result, vapp_name)
3102 elif "forceOff" in action_dict:
3103 result = vapp.undeploy(action='powerOff')
3104 self.instance_actions_result("forceOff", result, vapp_name)
3105 elif "reboot" in action_dict:
3106 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3107 reboot_task = vapp.reboot()
3108 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3109 else:
3110 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3111 return vm__vim_uuid
3112 except Exception as exp :
3113 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3114 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3115
3116 def instance_actions_result(self, action, result, vapp_name):
3117 if result.get('status') == 'success':
3118 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3119 else:
3120 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3121
3122 def get_vminstance_console(self, vm_id, console_type="novnc"):
3123 """
3124 Get a console for the virtual machine
3125 Params:
3126 vm_id: uuid of the VM
3127 console_type, can be:
3128 "novnc" (by default), "xvpvnc" for VNC types,
3129 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3130 Returns dict with the console parameters:
3131 protocol: ssh, ftp, http, https, ...
3132 server: usually ip address
3133 port: the http, ssh, ... port
3134 suffix: extra text, e.g. the http path and query string
3135 """
3136 console_dict = {}
3137
3138 if console_type==None or console_type=='novnc':
3139
3140 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3141
3142 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3143 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3144 response = self.perform_request(req_type='POST',
3145 url=url_rest_call,
3146 headers=headers)
3147
3148 if response.status_code == 403:
3149 response = self.retry_rest('GET', url_rest_call)
3150
3151 if response.status_code != 200:
3152 self.logger.error("REST call {} failed reason : {}"\
3153 "status code : {}".format(url_rest_call,
3154 response.content,
3155 response.status_code))
3156 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3157 "VM Mks ticket details")
3158 s = re.search("<Host>(.*?)</Host>",response.content)
3159 console_dict['server'] = s.group(1) if s else None
3160 s1 = re.search("<Port>(\d+)</Port>",response.content)
3161 console_dict['port'] = s1.group(1) if s1 else None
3162
3163
3164 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3165
3166 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3167 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3168 response = self.perform_request(req_type='POST',
3169 url=url_rest_call,
3170 headers=headers)
3171
3172 if response.status_code == 403:
3173 response = self.retry_rest('GET', url_rest_call)
3174
3175 if response.status_code != 200:
3176 self.logger.error("REST call {} failed reason : {}"\
3177 "status code : {}".format(url_rest_call,
3178 response.content,
3179 response.status_code))
3180 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3181 "VM console details")
3182 s = re.search(">.*?/(vm-\d+.*)</",response.content)
3183 console_dict['suffix'] = s.group(1) if s else None
3184 console_dict['protocol'] = "https"
3185
3186 return console_dict
3187
3188 # NOT USED METHODS in current version
3189
3190 def host_vim2gui(self, host, server_dict):
3191 """Transform host dictionary from VIM format to GUI format,
3192 and append to the server_dict
3193 """
3194 raise vimconn.vimconnNotImplemented("Should have implemented this")
3195
3196 def get_hosts_info(self):
3197 """Get the information of deployed hosts
3198 Returns the hosts content"""
3199 raise vimconn.vimconnNotImplemented("Should have implemented this")
3200
3201 def get_hosts(self, vim_tenant):
3202 """Get the hosts and deployed instances
3203 Returns the hosts content"""
3204 raise vimconn.vimconnNotImplemented("Should have implemented this")
3205
3206 def get_processor_rankings(self):
3207 """Get the processor rankings in the VIM database"""
3208 raise vimconn.vimconnNotImplemented("Should have implemented this")
3209
3210 def new_host(self, host_data):
3211 """Adds a new host to VIM"""
3212 '''Returns status code of the VIM response'''
3213 raise vimconn.vimconnNotImplemented("Should have implemented this")
3214
3215 def new_external_port(self, port_data):
3216 """Adds a external port to VIM"""
3217 '''Returns the port identifier'''
3218 raise vimconn.vimconnNotImplemented("Should have implemented this")
3219
3220 def new_external_network(self, net_name, net_type):
3221 """Adds a external network to VIM (shared)"""
3222 '''Returns the network identifier'''
3223 raise vimconn.vimconnNotImplemented("Should have implemented this")
3224
3225 def connect_port_network(self, port_id, network_id, admin=False):
3226 """Connects a external port to a network"""
3227 '''Returns status code of the VIM response'''
3228 raise vimconn.vimconnNotImplemented("Should have implemented this")
3229
3230 def new_vminstancefromJSON(self, vm_data):
3231 """Adds a VM instance to VIM"""
3232 '''Returns the instance identifier'''
3233 raise vimconn.vimconnNotImplemented("Should have implemented this")
3234
3235 def get_network_name_by_id(self, network_uuid=None):
3236 """Method gets vcloud director network named based on supplied uuid.
3237
3238 Args:
3239 network_uuid: network_id
3240
3241 Returns:
3242 The return network name.
3243 """
3244
3245 if not network_uuid:
3246 return None
3247
3248 try:
3249 org_dict = self.get_org(self.org_uuid)
3250 if 'networks' in org_dict:
3251 org_network_dict = org_dict['networks']
3252 for net_uuid in org_network_dict:
3253 if net_uuid == network_uuid:
3254 return org_network_dict[net_uuid]
3255 except:
3256 self.logger.debug("Exception in get_network_name_by_id")
3257 self.logger.debug(traceback.format_exc())
3258
3259 return None
3260
3261 def get_network_id_by_name(self, network_name=None):
3262 """Method gets vcloud director network uuid based on supplied name.
3263
3264 Args:
3265 network_name: network_name
3266 Returns:
3267 The return network uuid.
3268 network_uuid: network_id
3269 """
3270
3271 if not network_name:
3272 self.logger.debug("get_network_id_by_name() : Network name is empty")
3273 return None
3274
3275 try:
3276 org_dict = self.get_org(self.org_uuid)
3277 if org_dict and 'networks' in org_dict:
3278 org_network_dict = org_dict['networks']
3279 for net_uuid, net_name in org_network_dict.items():
3280 if net_name == network_name:
3281 return net_uuid
3282
3283 except KeyError as exp:
3284 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3285
3286 return None
3287
3288 def list_org_action(self):
3289 """
3290 Method leverages vCloud director and query for available organization for particular user
3291
3292 Args:
3293 vca - is active VCA connection.
3294 vdc_name - is a vdc name that will be used to query vms action
3295
3296 Returns:
3297 The return XML respond
3298 """
3299 url_list = [self.url, '/api/org']
3300 vm_list_rest_call = ''.join(url_list)
3301
3302 if self.client._session:
3303 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3304 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3305
3306 response = self.perform_request(req_type='GET',
3307 url=vm_list_rest_call,
3308 headers=headers)
3309
3310 if response.status_code == 403:
3311 response = self.retry_rest('GET', vm_list_rest_call)
3312
3313 if response.status_code == requests.codes.ok:
3314 return response.content
3315
3316 return None
3317
3318 def get_org_action(self, org_uuid=None):
3319 """
3320 Method leverages vCloud director and retrieve available object for organization.
3321
3322 Args:
3323 org_uuid - vCD organization uuid
3324 self.client - is active connection.
3325
3326 Returns:
3327 The return XML respond
3328 """
3329
3330 if org_uuid is None:
3331 return None
3332
3333 url_list = [self.url, '/api/org/', org_uuid]
3334 vm_list_rest_call = ''.join(url_list)
3335
3336 if self.client._session:
3337 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3338 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3339
3340 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3341 response = self.perform_request(req_type='GET',
3342 url=vm_list_rest_call,
3343 headers=headers)
3344 if response.status_code == 403:
3345 response = self.retry_rest('GET', vm_list_rest_call)
3346
3347 if response.status_code == requests.codes.ok:
3348 return response.content
3349 return None
3350
3351 def get_org(self, org_uuid=None):
3352 """
3353 Method retrieves available organization in vCloud Director
3354
3355 Args:
3356 org_uuid - is a organization uuid.
3357
3358 Returns:
3359 The return dictionary with following key
3360 "network" - for network list under the org
3361 "catalogs" - for network list under the org
3362 "vdcs" - for vdc list under org
3363 """
3364
3365 org_dict = {}
3366
3367 if org_uuid is None:
3368 return org_dict
3369
3370 content = self.get_org_action(org_uuid=org_uuid)
3371 try:
3372 vdc_list = {}
3373 network_list = {}
3374 catalog_list = {}
3375 vm_list_xmlroot = XmlElementTree.fromstring(content)
3376 for child in vm_list_xmlroot:
3377 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3378 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3379 org_dict['vdcs'] = vdc_list
3380 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3381 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3382 org_dict['networks'] = network_list
3383 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3384 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3385 org_dict['catalogs'] = catalog_list
3386 except:
3387 pass
3388
3389 return org_dict
3390
3391 def get_org_list(self):
3392 """
3393 Method retrieves available organization in vCloud Director
3394
3395 Args:
3396 vca - is active VCA connection.
3397
3398 Returns:
3399 The return dictionary and key for each entry VDC UUID
3400 """
3401
3402 org_dict = {}
3403
3404 content = self.list_org_action()
3405 try:
3406 vm_list_xmlroot = XmlElementTree.fromstring(content)
3407 for vm_xml in vm_list_xmlroot:
3408 if vm_xml.tag.split("}")[1] == 'Org':
3409 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3410 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3411 except:
3412 pass
3413
3414 return org_dict
3415
3416 def vms_view_action(self, vdc_name=None):
3417 """ Method leverages vCloud director vms query call
3418
3419 Args:
3420 vca - is active VCA connection.
3421 vdc_name - is a vdc name that will be used to query vms action
3422
3423 Returns:
3424 The return XML respond
3425 """
3426 vca = self.connect()
3427 if vdc_name is None:
3428 return None
3429
3430 url_list = [vca.host, '/api/vms/query']
3431 vm_list_rest_call = ''.join(url_list)
3432
3433 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3434 refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
3435 ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3436 if len(refs) == 1:
3437 response = Http.get(url=vm_list_rest_call,
3438 headers=vca.vcloud_session.get_vcloud_headers(),
3439 verify=vca.verify,
3440 logger=vca.logger)
3441 if response.status_code == requests.codes.ok:
3442 return response.content
3443
3444 return None
3445
3446 def get_vapp_list(self, vdc_name=None):
3447 """
3448 Method retrieves vApp list deployed vCloud director and returns a dictionary
3449 contains a list of all vapp deployed for queried VDC.
3450 The key for a dictionary is vApp UUID
3451
3452
3453 Args:
3454 vca - is active VCA connection.
3455 vdc_name - is a vdc name that will be used to query vms action
3456
3457 Returns:
3458 The return dictionary and key for each entry vapp UUID
3459 """
3460
3461 vapp_dict = {}
3462 if vdc_name is None:
3463 return vapp_dict
3464
3465 content = self.vms_view_action(vdc_name=vdc_name)
3466 try:
3467 vm_list_xmlroot = XmlElementTree.fromstring(content)
3468 for vm_xml in vm_list_xmlroot:
3469 if vm_xml.tag.split("}")[1] == 'VMRecord':
3470 if vm_xml.attrib['isVAppTemplate'] == 'true':
3471 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3472 if 'vappTemplate-' in rawuuid[0]:
3473 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3474 # vm and use raw UUID as key
3475 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3476 except:
3477 pass
3478
3479 return vapp_dict
3480
3481 def get_vm_list(self, vdc_name=None):
3482 """
3483 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3484 contains a list of all VM's deployed for queried VDC.
3485 The key for a dictionary is VM UUID
3486
3487
3488 Args:
3489 vca - is active VCA connection.
3490 vdc_name - is a vdc name that will be used to query vms action
3491
3492 Returns:
3493 The return dictionary and key for each entry vapp UUID
3494 """
3495 vm_dict = {}
3496
3497 if vdc_name is None:
3498 return vm_dict
3499
3500 content = self.vms_view_action(vdc_name=vdc_name)
3501 try:
3502 vm_list_xmlroot = XmlElementTree.fromstring(content)
3503 for vm_xml in vm_list_xmlroot:
3504 if vm_xml.tag.split("}")[1] == 'VMRecord':
3505 if vm_xml.attrib['isVAppTemplate'] == 'false':
3506 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3507 if 'vm-' in rawuuid[0]:
3508 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3509 # vm and use raw UUID as key
3510 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3511 except:
3512 pass
3513
3514 return vm_dict
3515
3516 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3517 """
3518 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3519 contains a list of all VM's deployed for queried VDC.
3520 The key for a dictionary is VM UUID
3521
3522
3523 Args:
3524 vca - is active VCA connection.
3525 vdc_name - is a vdc name that will be used to query vms action
3526
3527 Returns:
3528 The return dictionary and key for each entry vapp UUID
3529 """
3530 vm_dict = {}
3531 vca = self.connect()
3532 if not vca:
3533 raise vimconn.vimconnConnectionException("self.connect() is failed")
3534
3535 if vdc_name is None:
3536 return vm_dict
3537
3538 content = self.vms_view_action(vdc_name=vdc_name)
3539 try:
3540 vm_list_xmlroot = XmlElementTree.fromstring(content)
3541 for vm_xml in vm_list_xmlroot:
3542 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3543 # lookup done by UUID
3544 if isuuid:
3545 if vapp_name in vm_xml.attrib['container']:
3546 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3547 if 'vm-' in rawuuid[0]:
3548 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3549 break
3550 # lookup done by Name
3551 else:
3552 if vapp_name in vm_xml.attrib['name']:
3553 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3554 if 'vm-' in rawuuid[0]:
3555 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3556 break
3557 except:
3558 pass
3559
3560 return vm_dict
3561
3562 def get_network_action(self, network_uuid=None):
3563 """
3564 Method leverages vCloud director and query network based on network uuid
3565
3566 Args:
3567 vca - is active VCA connection.
3568 network_uuid - is a network uuid
3569
3570 Returns:
3571 The return XML respond
3572 """
3573
3574 if network_uuid is None:
3575 return None
3576
3577 url_list = [self.url, '/api/network/', network_uuid]
3578 vm_list_rest_call = ''.join(url_list)
3579
3580 if self.client._session:
3581 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3582 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3583
3584 response = self.perform_request(req_type='GET',
3585 url=vm_list_rest_call,
3586 headers=headers)
3587 #Retry login if session expired & retry sending request
3588 if response.status_code == 403:
3589 response = self.retry_rest('GET', vm_list_rest_call)
3590
3591 if response.status_code == requests.codes.ok:
3592 return response.content
3593
3594 return None
3595
3596 def get_vcd_network(self, network_uuid=None):
3597 """
3598 Method retrieves available network from vCloud Director
3599
3600 Args:
3601 network_uuid - is VCD network UUID
3602
3603 Each element serialized as key : value pair
3604
3605 Following keys available for access. network_configuration['Gateway'}
3606 <Configuration>
3607 <IpScopes>
3608 <IpScope>
3609 <IsInherited>true</IsInherited>
3610 <Gateway>172.16.252.100</Gateway>
3611 <Netmask>255.255.255.0</Netmask>
3612 <Dns1>172.16.254.201</Dns1>
3613 <Dns2>172.16.254.202</Dns2>
3614 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3615 <IsEnabled>true</IsEnabled>
3616 <IpRanges>
3617 <IpRange>
3618 <StartAddress>172.16.252.1</StartAddress>
3619 <EndAddress>172.16.252.99</EndAddress>
3620 </IpRange>
3621 </IpRanges>
3622 </IpScope>
3623 </IpScopes>
3624 <FenceMode>bridged</FenceMode>
3625
3626 Returns:
3627 The return dictionary and key for each entry vapp UUID
3628 """
3629
3630 network_configuration = {}
3631 if network_uuid is None:
3632 return network_uuid
3633
3634 try:
3635 content = self.get_network_action(network_uuid=network_uuid)
3636 vm_list_xmlroot = XmlElementTree.fromstring(content)
3637
3638 network_configuration['status'] = vm_list_xmlroot.get("status")
3639 network_configuration['name'] = vm_list_xmlroot.get("name")
3640 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3641
3642 for child in vm_list_xmlroot:
3643 if child.tag.split("}")[1] == 'IsShared':
3644 network_configuration['isShared'] = child.text.strip()
3645 if child.tag.split("}")[1] == 'Configuration':
3646 for configuration in child.iter():
3647 tagKey = configuration.tag.split("}")[1].strip()
3648 if tagKey != "":
3649 network_configuration[tagKey] = configuration.text.strip()
3650 return network_configuration
3651 except Exception as exp :
3652 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3653 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3654
3655 return network_configuration
3656
3657 def delete_network_action(self, network_uuid=None):
3658 """
3659 Method delete given network from vCloud director
3660
3661 Args:
3662 network_uuid - is a network uuid that client wish to delete
3663
3664 Returns:
3665 The return None or XML respond or false
3666 """
3667 client = self.connect_as_admin()
3668 if not client:
3669 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3670 if network_uuid is None:
3671 return False
3672
3673 url_list = [self.url, '/api/admin/network/', network_uuid]
3674 vm_list_rest_call = ''.join(url_list)
3675
3676 if client._session:
3677 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3678 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3679 response = self.perform_request(req_type='DELETE',
3680 url=vm_list_rest_call,
3681 headers=headers)
3682 if response.status_code == 202:
3683 return True
3684
3685 return False
3686
3687 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3688 ip_profile=None, isshared='true'):
3689 """
3690 Method create network in vCloud director
3691
3692 Args:
3693 network_name - is network name to be created.
3694 net_type - can be 'bridge','data','ptp','mgmt'.
3695 ip_profile is a dict containing the IP parameters of the network
3696 isshared - is a boolean
3697 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3698 It optional attribute. by default if no parent network indicate the first available will be used.
3699
3700 Returns:
3701 The return network uuid or return None
3702 """
3703
3704 new_network_name = [network_name, '-', str(uuid.uuid4())]
3705 content = self.create_network_rest(network_name=''.join(new_network_name),
3706 ip_profile=ip_profile,
3707 net_type=net_type,
3708 parent_network_uuid=parent_network_uuid,
3709 isshared=isshared)
3710 if content is None:
3711 self.logger.debug("Failed create network {}.".format(network_name))
3712 return None
3713
3714 try:
3715 vm_list_xmlroot = XmlElementTree.fromstring(content)
3716 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3717 if len(vcd_uuid) == 4:
3718 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3719 return vcd_uuid[3]
3720 except:
3721 self.logger.debug("Failed create network {}".format(network_name))
3722 return None
3723
3724 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3725 ip_profile=None, isshared='true'):
3726 """
3727 Method create network in vCloud director
3728
3729 Args:
3730 network_name - is network name to be created.
3731 net_type - can be 'bridge','data','ptp','mgmt'.
3732 ip_profile is a dict containing the IP parameters of the network
3733 isshared - is a boolean
3734 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3735 It optional attribute. by default if no parent network indicate the first available will be used.
3736
3737 Returns:
3738 The return network uuid or return None
3739 """
3740 client_as_admin = self.connect_as_admin()
3741 if not client_as_admin:
3742 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3743 if network_name is None:
3744 return None
3745
3746 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3747 vm_list_rest_call = ''.join(url_list)
3748
3749 if client_as_admin._session:
3750 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3751 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3752
3753 response = self.perform_request(req_type='GET',
3754 url=vm_list_rest_call,
3755 headers=headers)
3756
3757 provider_network = None
3758 available_networks = None
3759 add_vdc_rest_url = None
3760
3761 if response.status_code != requests.codes.ok:
3762 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3763 response.status_code))
3764 return None
3765 else:
3766 try:
3767 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3768 for child in vm_list_xmlroot:
3769 if child.tag.split("}")[1] == 'ProviderVdcReference':
3770 provider_network = child.attrib.get('href')
3771 # application/vnd.vmware.admin.providervdc+xml
3772 if child.tag.split("}")[1] == 'Link':
3773 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3774 and child.attrib.get('rel') == 'add':
3775 add_vdc_rest_url = child.attrib.get('href')
3776 except:
3777 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3778 self.logger.debug("Respond body {}".format(response.content))
3779 return None
3780
3781 # find pvdc provided available network
3782 response = self.perform_request(req_type='GET',
3783 url=provider_network,
3784 headers=headers)
3785 if response.status_code != requests.codes.ok:
3786 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3787 response.status_code))
3788 return None
3789
3790 if parent_network_uuid is None:
3791 try:
3792 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3793 for child in vm_list_xmlroot.iter():
3794 if child.tag.split("}")[1] == 'AvailableNetworks':
3795 for networks in child.iter():
3796 # application/vnd.vmware.admin.network+xml
3797 if networks.attrib.get('href') is not None:
3798 available_networks = networks.attrib.get('href')
3799 break
3800 except:
3801 return None
3802
3803 try:
3804 #Configure IP profile of the network
3805 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3806
3807 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3808 subnet_rand = random.randint(0, 255)
3809 ip_base = "192.168.{}.".format(subnet_rand)
3810 ip_profile['subnet_address'] = ip_base + "0/24"
3811 else:
3812 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3813
3814 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3815 ip_profile['gateway_address']=ip_base + "1"
3816 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3817 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3818 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3819 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3820 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3821 ip_profile['dhcp_start_address']=ip_base + "3"
3822 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3823 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3824 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3825 ip_profile['dns_address']=ip_base + "2"
3826
3827 gateway_address=ip_profile['gateway_address']
3828 dhcp_count=int(ip_profile['dhcp_count'])
3829 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3830
3831 if ip_profile['dhcp_enabled']==True:
3832 dhcp_enabled='true'
3833 else:
3834 dhcp_enabled='false'
3835 dhcp_start_address=ip_profile['dhcp_start_address']
3836
3837 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3838 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3839 end_ip_int += dhcp_count - 1
3840 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3841
3842 ip_version=ip_profile['ip_version']
3843 dns_address=ip_profile['dns_address']
3844 except KeyError as exp:
3845 self.logger.debug("Create Network REST: Key error {}".format(exp))
3846 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3847
3848 # either use client provided UUID or search for a first available
3849 # if both are not defined we return none
3850 if parent_network_uuid is not None:
3851 provider_network = None
3852 available_networks = None
3853 add_vdc_rest_url = None
3854
3855 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3856 add_vdc_rest_url = ''.join(url_list)
3857
3858 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3859 available_networks = ''.join(url_list)
3860
3861 #Creating all networks as Direct Org VDC type networks.
3862 #Unused in case of Underlay (data/ptp) network interface.
3863 fence_mode="isolated"
3864 is_inherited='false'
3865 dns_list = dns_address.split(";")
3866 dns1 = dns_list[0]
3867 dns2_text = ""
3868 if len(dns_list) >= 2:
3869 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3870 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3871 <Description>Openmano created</Description>
3872 <Configuration>
3873 <IpScopes>
3874 <IpScope>
3875 <IsInherited>{1:s}</IsInherited>
3876 <Gateway>{2:s}</Gateway>
3877 <Netmask>{3:s}</Netmask>
3878 <Dns1>{4:s}</Dns1>{5:s}
3879 <IsEnabled>{6:s}</IsEnabled>
3880 <IpRanges>
3881 <IpRange>
3882 <StartAddress>{7:s}</StartAddress>
3883 <EndAddress>{8:s}</EndAddress>
3884 </IpRange>
3885 </IpRanges>
3886 </IpScope>
3887 </IpScopes>
3888 <FenceMode>{9:s}</FenceMode>
3889 </Configuration>
3890 <IsShared>{10:s}</IsShared>
3891 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3892 subnet_address, dns1, dns2_text, dhcp_enabled,
3893 dhcp_start_address, dhcp_end_address,
3894 fence_mode, isshared)
3895
3896 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3897 try:
3898 response = self.perform_request(req_type='POST',
3899 url=add_vdc_rest_url,
3900 headers=headers,
3901 data=data)
3902
3903 if response.status_code != 201:
3904 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
3905 .format(response.status_code,response.content))
3906 else:
3907 network_task = self.get_task_from_response(response.content)
3908 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3909 time.sleep(5)
3910 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3911 if result.get('status') == 'success':
3912 return response.content
3913 else:
3914 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3915 .format(response.content))
3916 except Exception as exp:
3917 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3918
3919 return None
3920
3921 def convert_cidr_to_netmask(self, cidr_ip=None):
3922 """
3923 Method sets convert CIDR netmask address to normal IP format
3924 Args:
3925 cidr_ip : CIDR IP address
3926 Returns:
3927 netmask : Converted netmask
3928 """
3929 if cidr_ip is not None:
3930 if '/' in cidr_ip:
3931 network, net_bits = cidr_ip.split('/')
3932 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3933 else:
3934 netmask = cidr_ip
3935 return netmask
3936 return None
3937
3938 def get_provider_rest(self, vca=None):
3939 """
3940 Method gets provider vdc view from vcloud director
3941
3942 Args:
3943 network_name - is network name to be created.
3944 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3945 It optional attribute. by default if no parent network indicate the first available will be used.
3946
3947 Returns:
3948 The return xml content of respond or None
3949 """
3950
3951 url_list = [self.url, '/api/admin']
3952 if vca:
3953 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3954 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3955 response = self.perform_request(req_type='GET',
3956 url=''.join(url_list),
3957 headers=headers)
3958
3959 if response.status_code == requests.codes.ok:
3960 return response.content
3961 return None
3962
3963 def create_vdc(self, vdc_name=None):
3964
3965 vdc_dict = {}
3966
3967 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3968 if xml_content is not None:
3969 try:
3970 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3971 for child in task_resp_xmlroot:
3972 if child.tag.split("}")[1] == 'Owner':
3973 vdc_id = child.attrib.get('href').split("/")[-1]
3974 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3975 return vdc_dict
3976 except:
3977 self.logger.debug("Respond body {}".format(xml_content))
3978
3979 return None
3980
3981 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3982 """
3983 Method create vdc in vCloud director based on VDC template.
3984 it uses pre-defined template.
3985
3986 Args:
3987 vdc_name - name of a new vdc.
3988
3989 Returns:
3990 The return xml content of respond or None
3991 """
3992 # pre-requesite atleast one vdc template should be available in vCD
3993 self.logger.info("Creating new vdc {}".format(vdc_name))
3994 vca = self.connect_as_admin()
3995 if not vca:
3996 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3997 if vdc_name is None:
3998 return None
3999
4000 url_list = [self.url, '/api/vdcTemplates']
4001 vm_list_rest_call = ''.join(url_list)
4002
4003 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4004 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4005 response = self.perform_request(req_type='GET',
4006 url=vm_list_rest_call,
4007 headers=headers)
4008
4009 # container url to a template
4010 vdc_template_ref = None
4011 try:
4012 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4013 for child in vm_list_xmlroot:
4014 # application/vnd.vmware.admin.providervdc+xml
4015 # we need find a template from witch we instantiate VDC
4016 if child.tag.split("}")[1] == 'VdcTemplate':
4017 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4018 vdc_template_ref = child.attrib.get('href')
4019 except:
4020 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4021 self.logger.debug("Respond body {}".format(response.content))
4022 return None
4023
4024 # if we didn't found required pre defined template we return None
4025 if vdc_template_ref is None:
4026 return None
4027
4028 try:
4029 # instantiate vdc
4030 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4031 vm_list_rest_call = ''.join(url_list)
4032 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4033 <Source href="{1:s}"></Source>
4034 <Description>opnemano</Description>
4035 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4036
4037 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4038
4039 response = self.perform_request(req_type='POST',
4040 url=vm_list_rest_call,
4041 headers=headers,
4042 data=data)
4043
4044 vdc_task = self.get_task_from_response(response.content)
4045 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4046
4047 # if we all ok we respond with content otherwise by default None
4048 if response.status_code >= 200 and response.status_code < 300:
4049 return response.content
4050 return None
4051 except:
4052 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4053 self.logger.debug("Respond body {}".format(response.content))
4054
4055 return None
4056
4057 def create_vdc_rest(self, vdc_name=None):
4058 """
4059 Method create network in vCloud director
4060
4061 Args:
4062 vdc_name - vdc name to be created
4063 Returns:
4064 The return response
4065 """
4066
4067 self.logger.info("Creating new vdc {}".format(vdc_name))
4068
4069 vca = self.connect_as_admin()
4070 if not vca:
4071 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4072 if vdc_name is None:
4073 return None
4074
4075 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4076 vm_list_rest_call = ''.join(url_list)
4077
4078 if vca._session:
4079 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4080 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4081 response = self.perform_request(req_type='GET',
4082 url=vm_list_rest_call,
4083 headers=headers)
4084
4085 provider_vdc_ref = None
4086 add_vdc_rest_url = None
4087 available_networks = None
4088
4089 if response.status_code != requests.codes.ok:
4090 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4091 response.status_code))
4092 return None
4093 else:
4094 try:
4095 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4096 for child in vm_list_xmlroot:
4097 # application/vnd.vmware.admin.providervdc+xml
4098 if child.tag.split("}")[1] == 'Link':
4099 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4100 and child.attrib.get('rel') == 'add':
4101 add_vdc_rest_url = child.attrib.get('href')
4102 except:
4103 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4104 self.logger.debug("Respond body {}".format(response.content))
4105 return None
4106
4107 response = self.get_provider_rest(vca=vca)
4108 try:
4109 vm_list_xmlroot = XmlElementTree.fromstring(response)
4110 for child in vm_list_xmlroot:
4111 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4112 for sub_child in child:
4113 provider_vdc_ref = sub_child.attrib.get('href')
4114 except:
4115 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4116 self.logger.debug("Respond body {}".format(response))
4117 return None
4118
4119 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4120 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4121 <AllocationModel>ReservationPool</AllocationModel>
4122 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4123 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4124 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4125 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4126 <ProviderVdcReference
4127 name="Main Provider"
4128 href="{2:s}" />
4129 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4130 escape(vdc_name),
4131 provider_vdc_ref)
4132
4133 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4134
4135 response = self.perform_request(req_type='POST',
4136 url=add_vdc_rest_url,
4137 headers=headers,
4138 data=data)
4139
4140 # if we all ok we respond with content otherwise by default None
4141 if response.status_code == 201:
4142 return response.content
4143 return None
4144
4145 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4146 """
4147 Method retrieve vapp detail from vCloud director
4148
4149 Args:
4150 vapp_uuid - is vapp identifier.
4151
4152 Returns:
4153 The return network uuid or return None
4154 """
4155
4156 parsed_respond = {}
4157 vca = None
4158
4159 if need_admin_access:
4160 vca = self.connect_as_admin()
4161 else:
4162 vca = self.client
4163
4164 if not vca:
4165 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4166 if vapp_uuid is None:
4167 return None
4168
4169 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4170 get_vapp_restcall = ''.join(url_list)
4171
4172 if vca._session:
4173 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4174 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4175 response = self.perform_request(req_type='GET',
4176 url=get_vapp_restcall,
4177 headers=headers)
4178
4179 if response.status_code == 403:
4180 if need_admin_access == False:
4181 response = self.retry_rest('GET', get_vapp_restcall)
4182
4183 if response.status_code != requests.codes.ok:
4184 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4185 response.status_code))
4186 return parsed_respond
4187
4188 try:
4189 xmlroot_respond = XmlElementTree.fromstring(response.content)
4190 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4191
4192 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4193 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4194 'vmw': 'http://www.vmware.com/schema/ovf',
4195 'vm': 'http://www.vmware.com/vcloud/v1.5',
4196 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4197 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4198 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4199 }
4200
4201 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4202 if created_section is not None:
4203 parsed_respond['created'] = created_section.text
4204
4205 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4206 if network_section is not None and 'networkName' in network_section.attrib:
4207 parsed_respond['networkname'] = network_section.attrib['networkName']
4208
4209 ipscopes_section = \
4210 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4211 namespaces)
4212 if ipscopes_section is not None:
4213 for ipscope in ipscopes_section:
4214 for scope in ipscope:
4215 tag_key = scope.tag.split("}")[1]
4216 if tag_key == 'IpRanges':
4217 ip_ranges = scope.getchildren()
4218 for ipblock in ip_ranges:
4219 for block in ipblock:
4220 parsed_respond[block.tag.split("}")[1]] = block.text
4221 else:
4222 parsed_respond[tag_key] = scope.text
4223
4224 # parse children section for other attrib
4225 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4226 if children_section is not None:
4227 parsed_respond['name'] = children_section.attrib['name']
4228 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4229 if "nestedHypervisorEnabled" in children_section.attrib else None
4230 parsed_respond['deployed'] = children_section.attrib['deployed']
4231 parsed_respond['status'] = children_section.attrib['status']
4232 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4233 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4234 nic_list = []
4235 for adapters in network_adapter:
4236 adapter_key = adapters.tag.split("}")[1]
4237 if adapter_key == 'PrimaryNetworkConnectionIndex':
4238 parsed_respond['primarynetwork'] = adapters.text
4239 if adapter_key == 'NetworkConnection':
4240 vnic = {}
4241 if 'network' in adapters.attrib:
4242 vnic['network'] = adapters.attrib['network']
4243 for adapter in adapters:
4244 setting_key = adapter.tag.split("}")[1]
4245 vnic[setting_key] = adapter.text
4246 nic_list.append(vnic)
4247
4248 for link in children_section:
4249 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4250 if link.attrib['rel'] == 'screen:acquireTicket':
4251 parsed_respond['acquireTicket'] = link.attrib
4252 if link.attrib['rel'] == 'screen:acquireMksTicket':
4253 parsed_respond['acquireMksTicket'] = link.attrib
4254
4255 parsed_respond['interfaces'] = nic_list
4256 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4257 if vCloud_extension_section is not None:
4258 vm_vcenter_info = {}
4259 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4260 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4261 if vmext is not None:
4262 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4263 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4264
4265 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4266 vm_virtual_hardware_info = {}
4267 if virtual_hardware_section is not None:
4268 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4269 if item.find("rasd:Description",namespaces).text == "Hard disk":
4270 disk_size = item.find("rasd:HostResource" ,namespaces
4271 ).attrib["{"+namespaces['vm']+"}capacity"]
4272
4273 vm_virtual_hardware_info["disk_size"]= disk_size
4274 break
4275
4276 for link in virtual_hardware_section:
4277 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4278 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4279 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4280 break
4281
4282 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4283 except Exception as exp :
4284 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4285 return parsed_respond
4286
4287 def acquire_console(self, vm_uuid=None):
4288
4289 if vm_uuid is None:
4290 return None
4291 if self.client._session:
4292 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4293 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4294 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4295 console_dict = vm_dict['acquireTicket']
4296 console_rest_call = console_dict['href']
4297
4298 response = self.perform_request(req_type='POST',
4299 url=console_rest_call,
4300 headers=headers)
4301
4302 if response.status_code == 403:
4303 response = self.retry_rest('POST', console_rest_call)
4304
4305 if response.status_code == requests.codes.ok:
4306 return response.content
4307
4308 return None
4309
4310 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4311 """
4312 Method retrieve vm disk details
4313
4314 Args:
4315 vapp_uuid - is vapp identifier.
4316 flavor_disk - disk size as specified in VNFD (flavor)
4317
4318 Returns:
4319 The return network uuid or return None
4320 """
4321 status = None
4322 try:
4323 #Flavor disk is in GB convert it into MB
4324 flavor_disk = int(flavor_disk) * 1024
4325 vm_details = self.get_vapp_details_rest(vapp_uuid)
4326 if vm_details:
4327 vm_name = vm_details["name"]
4328 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4329
4330 if vm_details and "vm_virtual_hardware" in vm_details:
4331 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4332 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4333
4334 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4335
4336 if flavor_disk > vm_disk:
4337 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4338 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4339 vm_disk, flavor_disk ))
4340 else:
4341 status = True
4342 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4343
4344 return status
4345 except Exception as exp:
4346 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4347
4348
4349 def modify_vm_disk_rest(self, disk_href , disk_size):
4350 """
4351 Method retrieve modify vm disk size
4352
4353 Args:
4354 disk_href - vCD API URL to GET and PUT disk data
4355 disk_size - disk size as specified in VNFD (flavor)
4356
4357 Returns:
4358 The return network uuid or return None
4359 """
4360 if disk_href is None or disk_size is None:
4361 return None
4362
4363 if self.client._session:
4364 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4365 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4366 response = self.perform_request(req_type='GET',
4367 url=disk_href,
4368 headers=headers)
4369
4370 if response.status_code == 403:
4371 response = self.retry_rest('GET', disk_href)
4372
4373 if response.status_code != requests.codes.ok:
4374 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4375 response.status_code))
4376 return None
4377 try:
4378 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4379 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
4380 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4381
4382 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4383 if item.find("rasd:Description",namespaces).text == "Hard disk":
4384 disk_item = item.find("rasd:HostResource" ,namespaces )
4385 if disk_item is not None:
4386 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4387 break
4388
4389 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4390 xml_declaration=True)
4391
4392 #Send PUT request to modify disk size
4393 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4394
4395 response = self.perform_request(req_type='PUT',
4396 url=disk_href,
4397 headers=headers,
4398 data=data)
4399 if response.status_code == 403:
4400 add_headers = {'Content-Type': headers['Content-Type']}
4401 response = self.retry_rest('PUT', disk_href, add_headers, data)
4402
4403 if response.status_code != 202:
4404 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4405 response.status_code))
4406 else:
4407 modify_disk_task = self.get_task_from_response(response.content)
4408 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4409 if result.get('status') == 'success':
4410 return True
4411 else:
4412 return False
4413 return None
4414
4415 except Exception as exp :
4416 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4417 return None
4418
4419 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4420 """
4421 Method to attach pci devices to VM
4422
4423 Args:
4424 vapp_uuid - uuid of vApp/VM
4425 pci_devices - pci devices infromation as specified in VNFD (flavor)
4426
4427 Returns:
4428 The status of add pci device task , vm object and
4429 vcenter_conect object
4430 """
4431 vm_obj = None
4432 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4433 vcenter_conect, content = self.get_vcenter_content()
4434 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4435
4436 if vm_moref_id:
4437 try:
4438 no_of_pci_devices = len(pci_devices)
4439 if no_of_pci_devices > 0:
4440 #Get VM and its host
4441 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4442 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4443 if host_obj and vm_obj:
4444 #get PCI devies from host on which vapp is currently installed
4445 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4446
4447 if avilable_pci_devices is None:
4448 #find other hosts with active pci devices
4449 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4450 content,
4451 no_of_pci_devices
4452 )
4453
4454 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4455 #Migrate vm to the host where PCI devices are availble
4456 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4457 task = self.relocate_vm(new_host_obj, vm_obj)
4458 if task is not None:
4459 result = self.wait_for_vcenter_task(task, vcenter_conect)
4460 self.logger.info("Migrate VM status: {}".format(result))
4461 host_obj = new_host_obj
4462 else:
4463 self.logger.info("Fail to migrate VM : {}".format(result))
4464 raise vimconn.vimconnNotFoundException(
4465 "Fail to migrate VM : {} to host {}".format(
4466 vmname_andid,
4467 new_host_obj)
4468 )
4469
4470 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4471 #Add PCI devices one by one
4472 for pci_device in avilable_pci_devices:
4473 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4474 if task:
4475 status= self.wait_for_vcenter_task(task, vcenter_conect)
4476 if status:
4477 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4478 else:
4479 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4480 return True, vm_obj, vcenter_conect
4481 else:
4482 self.logger.error("Currently there is no host with"\
4483 " {} number of avaialble PCI devices required for VM {}".format(
4484 no_of_pci_devices,
4485 vmname_andid)
4486 )
4487 raise vimconn.vimconnNotFoundException(
4488 "Currently there is no host with {} "\
4489 "number of avaialble PCI devices required for VM {}".format(
4490 no_of_pci_devices,
4491 vmname_andid))
4492 else:
4493 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4494
4495 except vmodl.MethodFault as error:
4496 self.logger.error("Error occurred while adding PCI devices {} ",error)
4497 return None, vm_obj, vcenter_conect
4498
4499 def get_vm_obj(self, content, mob_id):
4500 """
4501 Method to get the vsphere VM object associated with a given morf ID
4502 Args:
4503 vapp_uuid - uuid of vApp/VM
4504 content - vCenter content object
4505 mob_id - mob_id of VM
4506
4507 Returns:
4508 VM and host object
4509 """
4510 vm_obj = None
4511 host_obj = None
4512 try :
4513 container = content.viewManager.CreateContainerView(content.rootFolder,
4514 [vim.VirtualMachine], True
4515 )
4516 for vm in container.view:
4517 mobID = vm._GetMoId()
4518 if mobID == mob_id:
4519 vm_obj = vm
4520 host_obj = vm_obj.runtime.host
4521 break
4522 except Exception as exp:
4523 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4524 return host_obj, vm_obj
4525
4526 def get_pci_devices(self, host, need_devices):
4527 """
4528 Method to get the details of pci devices on given host
4529 Args:
4530 host - vSphere host object
4531 need_devices - number of pci devices needed on host
4532
4533 Returns:
4534 array of pci devices
4535 """
4536 all_devices = []
4537 all_device_ids = []
4538 used_devices_ids = []
4539
4540 try:
4541 if host:
4542 pciPassthruInfo = host.config.pciPassthruInfo
4543 pciDevies = host.hardware.pciDevice
4544
4545 for pci_status in pciPassthruInfo:
4546 if pci_status.passthruActive:
4547 for device in pciDevies:
4548 if device.id == pci_status.id:
4549 all_device_ids.append(device.id)
4550 all_devices.append(device)
4551
4552 #check if devices are in use
4553 avalible_devices = all_devices
4554 for vm in host.vm:
4555 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4556 vm_devices = vm.config.hardware.device
4557 for device in vm_devices:
4558 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4559 if device.backing.id in all_device_ids:
4560 for use_device in avalible_devices:
4561 if use_device.id == device.backing.id:
4562 avalible_devices.remove(use_device)
4563 used_devices_ids.append(device.backing.id)
4564 self.logger.debug("Device {} from devices {}"\
4565 "is in use".format(device.backing.id,
4566 device)
4567 )
4568 if len(avalible_devices) < need_devices:
4569 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4570 need_devices))
4571 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4572 avalible_devices))
4573 return None
4574 else:
4575 required_devices = avalible_devices[:need_devices]
4576 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4577 len(avalible_devices),
4578 host,
4579 need_devices))
4580 self.logger.info("Retruning {} devices as {}".format(need_devices,
4581 required_devices ))
4582 return required_devices
4583
4584 except Exception as exp:
4585 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4586
4587 return None
4588
4589 def get_host_and_PCIdevices(self, content, need_devices):
4590 """
4591 Method to get the details of pci devices infromation on all hosts
4592
4593 Args:
4594 content - vSphere host object
4595 need_devices - number of pci devices needed on host
4596
4597 Returns:
4598 array of pci devices and host object
4599 """
4600 host_obj = None
4601 pci_device_objs = None
4602 try:
4603 if content:
4604 container = content.viewManager.CreateContainerView(content.rootFolder,
4605 [vim.HostSystem], True)
4606 for host in container.view:
4607 devices = self.get_pci_devices(host, need_devices)
4608 if devices:
4609 host_obj = host
4610 pci_device_objs = devices
4611 break
4612 except Exception as exp:
4613 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4614
4615 return host_obj,pci_device_objs
4616
4617 def relocate_vm(self, dest_host, vm) :
4618 """
4619 Method to get the relocate VM to new host
4620
4621 Args:
4622 dest_host - vSphere host object
4623 vm - vSphere VM object
4624
4625 Returns:
4626 task object
4627 """
4628 task = None
4629 try:
4630 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4631 task = vm.Relocate(relocate_spec)
4632 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4633 except Exception as exp:
4634 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4635 dest_host, vm, exp))
4636 return task
4637
4638 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4639 """
4640 Waits and provides updates on a vSphere task
4641 """
4642 while task.info.state == vim.TaskInfo.State.running:
4643 time.sleep(2)
4644
4645 if task.info.state == vim.TaskInfo.State.success:
4646 if task.info.result is not None and not hideResult:
4647 self.logger.info('{} completed successfully, result: {}'.format(
4648 actionName,
4649 task.info.result))
4650 else:
4651 self.logger.info('Task {} completed successfully.'.format(actionName))
4652 else:
4653 self.logger.error('{} did not complete successfully: {} '.format(
4654 actionName,
4655 task.info.error)
4656 )
4657
4658 return task.info.result
4659
4660 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4661 """
4662 Method to add pci device in given VM
4663
4664 Args:
4665 host_object - vSphere host object
4666 vm_object - vSphere VM object
4667 host_pci_dev - host_pci_dev must be one of the devices from the
4668 host_object.hardware.pciDevice list
4669 which is configured as a PCI passthrough device
4670
4671 Returns:
4672 task object
4673 """
4674 task = None
4675 if vm_object and host_object and host_pci_dev:
4676 try :
4677 #Add PCI device to VM
4678 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4679 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4680
4681 if host_pci_dev.id not in systemid_by_pciid:
4682 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4683 return None
4684
4685 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4686 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4687 id=host_pci_dev.id,
4688 systemId=systemid_by_pciid[host_pci_dev.id],
4689 vendorId=host_pci_dev.vendorId,
4690 deviceName=host_pci_dev.deviceName)
4691
4692 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4693
4694 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4695 new_device_config.operation = "add"
4696 vmConfigSpec = vim.vm.ConfigSpec()
4697 vmConfigSpec.deviceChange = [new_device_config]
4698
4699 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4700 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4701 host_pci_dev, vm_object, host_object)
4702 )
4703 except Exception as exp:
4704 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4705 host_pci_dev,
4706 vm_object,
4707 exp))
4708 return task
4709
4710 def get_vm_vcenter_info(self):
4711 """
4712 Method to get details of vCenter and vm
4713
4714 Args:
4715 vapp_uuid - uuid of vApp or VM
4716
4717 Returns:
4718 Moref Id of VM and deails of vCenter
4719 """
4720 vm_vcenter_info = {}
4721
4722 if self.vcenter_ip is not None:
4723 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4724 else:
4725 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4726 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4727 if self.vcenter_port is not None:
4728 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4729 else:
4730 raise vimconn.vimconnException(message="vCenter port is not provided."\
4731 " Please provide vCenter port while attaching datacenter to tenant in --config")
4732 if self.vcenter_user is not None:
4733 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4734 else:
4735 raise vimconn.vimconnException(message="vCenter user is not provided."\
4736 " Please provide vCenter user while attaching datacenter to tenant in --config")
4737
4738 if self.vcenter_password is not None:
4739 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4740 else:
4741 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4742 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4743
4744 return vm_vcenter_info
4745
4746
4747 def get_vm_pci_details(self, vmuuid):
4748 """
4749 Method to get VM PCI device details from vCenter
4750
4751 Args:
4752 vm_obj - vSphere VM object
4753
4754 Returns:
4755 dict of PCI devives attached to VM
4756
4757 """
4758 vm_pci_devices_info = {}
4759 try:
4760 vcenter_conect, content = self.get_vcenter_content()
4761 vm_moref_id = self.get_vm_moref_id(vmuuid)
4762 if vm_moref_id:
4763 #Get VM and its host
4764 if content:
4765 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4766 if host_obj and vm_obj:
4767 vm_pci_devices_info["host_name"]= host_obj.name
4768 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4769 for device in vm_obj.config.hardware.device:
4770 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4771 device_details={'devide_id':device.backing.id,
4772 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4773 }
4774 vm_pci_devices_info[device.deviceInfo.label] = device_details
4775 else:
4776 self.logger.error("Can not connect to vCenter while getting "\
4777 "PCI devices infromationn")
4778 return vm_pci_devices_info
4779 except Exception as exp:
4780 self.logger.error("Error occurred while getting VM infromationn"\
4781 " for VM : {}".format(exp))
4782 raise vimconn.vimconnException(message=exp)
4783
4784
4785 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4786 """
4787 Method to reserve memory for all VMs
4788 Args :
4789 vapp - VApp
4790 memory_mb - Memory in MB
4791 Returns:
4792 None
4793 """
4794
4795 self.logger.info("Reserve memory for all VMs")
4796 for vms in vapp.get_all_vms():
4797 vm_id = vms.get('id').split(':')[-1]
4798
4799 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4800
4801 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4802 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4803 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4804 response = self.perform_request(req_type='GET',
4805 url=url_rest_call,
4806 headers=headers)
4807
4808 if response.status_code == 403:
4809 response = self.retry_rest('GET', url_rest_call)
4810
4811 if response.status_code != 200:
4812 self.logger.error("REST call {} failed reason : {}"\
4813 "status code : {}".format(url_rest_call,
4814 response.content,
4815 response.status_code))
4816 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4817 "memory")
4818
4819 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4820 contentelem = lxmlElementTree.XML(bytexml)
4821 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
4822 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4823
4824 # Find the reservation element in the response
4825 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4826 for memelem in memelem_list:
4827 memelem.text = str(memory_mb)
4828
4829 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4830
4831 response = self.perform_request(req_type='PUT',
4832 url=url_rest_call,
4833 headers=headers,
4834 data=newdata)
4835
4836 if response.status_code == 403:
4837 add_headers = {'Content-Type': headers['Content-Type']}
4838 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4839
4840 if response.status_code != 202:
4841 self.logger.error("REST call {} failed reason : {}"\
4842 "status code : {} ".format(url_rest_call,
4843 response.content,
4844 response.status_code))
4845 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4846 "virtual hardware memory section")
4847 else:
4848 mem_task = self.get_task_from_response(response.content)
4849 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4850 if result.get('status') == 'success':
4851 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4852 .format(vm_id))
4853 else:
4854 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4855 .format(vm_id))
4856
4857 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4858 """
4859 Configure VApp network config with org vdc network
4860 Args :
4861 vapp - VApp
4862 Returns:
4863 None
4864 """
4865
4866 self.logger.info("Connecting vapp {} to org vdc network {}".
4867 format(vapp_id, net_name))
4868
4869 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4870
4871 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4872 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4873 response = self.perform_request(req_type='GET',
4874 url=url_rest_call,
4875 headers=headers)
4876
4877 if response.status_code == 403:
4878 response = self.retry_rest('GET', url_rest_call)
4879
4880 if response.status_code != 200:
4881 self.logger.error("REST call {} failed reason : {}"\
4882 "status code : {}".format(url_rest_call,
4883 response.content,
4884 response.status_code))
4885 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
4886 "network config section")
4887
4888 data = response.content
4889 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4890 net_id = self.get_network_id_by_name(net_name)
4891 if not net_id:
4892 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
4893 "existing network")
4894
4895 bytexml = bytes(bytearray(data, encoding='utf-8'))
4896 newelem = lxmlElementTree.XML(bytexml)
4897 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
4898 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4899 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4900
4901 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
4902 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
4903 if parentnetworklist:
4904 for pn in parentnetworklist:
4905 if "href" not in pn.keys():
4906 id_val = pn.get("id")
4907 href_val = "{}/api/network/{}".format(self.url, id_val)
4908 pn.set("href", href_val)
4909
4910 newstr = """<NetworkConfig networkName="{}">
4911 <Configuration>
4912 <ParentNetwork href="{}/api/network/{}"/>
4913 <FenceMode>bridged</FenceMode>
4914 </Configuration>
4915 </NetworkConfig>
4916 """.format(net_name, self.url, net_id)
4917 newcfgelem = lxmlElementTree.fromstring(newstr)
4918 if nwcfglist:
4919 nwcfglist[0].addnext(newcfgelem)
4920
4921 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
4922
4923 response = self.perform_request(req_type='PUT',
4924 url=url_rest_call,
4925 headers=headers,
4926 data=newdata)
4927
4928 if response.status_code == 403:
4929 add_headers = {'Content-Type': headers['Content-Type']}
4930 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4931
4932 if response.status_code != 202:
4933 self.logger.error("REST call {} failed reason : {}"\
4934 "status code : {} ".format(url_rest_call,
4935 response.content,
4936 response.status_code))
4937 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
4938 "network config section")
4939 else:
4940 vapp_task = self.get_task_from_response(response.content)
4941 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
4942 if result.get('status') == 'success':
4943 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
4944 "network {}".format(vapp_id, net_name))
4945 else:
4946 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
4947 "connect to network {}".format(vapp_id, net_name))
4948
4949 def remove_primary_network_adapter_from_all_vms(self, vapp):
4950 """
4951 Method to remove network adapter type to vm
4952 Args :
4953 vapp - VApp
4954 Returns:
4955 None
4956 """
4957
4958 self.logger.info("Removing network adapter from all VMs")
4959 for vms in vapp.get_all_vms():
4960 vm_id = vms.get('id').split(':')[-1]
4961
4962 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4963
4964 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4965 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4966 response = self.perform_request(req_type='GET',
4967 url=url_rest_call,
4968 headers=headers)
4969
4970 if response.status_code == 403:
4971 response = self.retry_rest('GET', url_rest_call)
4972
4973 if response.status_code != 200:
4974 self.logger.error("REST call {} failed reason : {}"\
4975 "status code : {}".format(url_rest_call,
4976 response.content,
4977 response.status_code))
4978 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
4979 "network connection section")
4980
4981 data = response.content
4982 data = data.split('<Link rel="edit"')[0]
4983
4984 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4985
4986 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4987 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
4988 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
4989 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
4990 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
4991 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
4992 xmlns:vmw="http://www.vmware.com/schema/ovf"
4993 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
4994 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
4995 xmlns:ns9="http://www.vmware.com/vcloud/versions"
4996 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
4997 <ovf:Info>Specifies the available VM network connections</ovf:Info>
4998 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
4999 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5000 </NetworkConnectionSection>""".format(url=url_rest_call)
5001 response = self.perform_request(req_type='PUT',
5002 url=url_rest_call,
5003 headers=headers,
5004 data=newdata)
5005
5006 if response.status_code == 403:
5007 add_headers = {'Content-Type': headers['Content-Type']}
5008 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5009
5010 if response.status_code != 202:
5011 self.logger.error("REST call {} failed reason : {}"\
5012 "status code : {} ".format(url_rest_call,
5013 response.content,
5014 response.status_code))
5015 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5016 "network connection section")
5017 else:
5018 nic_task = self.get_task_from_response(response.content)
5019 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5020 if result.get('status') == 'success':
5021 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5022 "default NIC type".format(vm_id))
5023 else:
5024 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5025 "connect NIC type".format(vm_id))
5026
5027 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5028 """
5029 Method to add network adapter type to vm
5030 Args :
5031 network_name - name of network
5032 primary_nic_index - int value for primary nic index
5033 nicIndex - int value for nic index
5034 nic_type - specify model name to which add to vm
5035 Returns:
5036 None
5037 """
5038
5039 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5040 format(network_name, nicIndex, nic_type))
5041 try:
5042 ip_address = None
5043 floating_ip = False
5044 mac_address = None
5045 if 'floating_ip' in net: floating_ip = net['floating_ip']
5046
5047 # Stub for ip_address feature
5048 if 'ip_address' in net: ip_address = net['ip_address']
5049
5050 if 'mac_address' in net: mac_address = net['mac_address']
5051
5052 if floating_ip:
5053 allocation_mode = "POOL"
5054 elif ip_address:
5055 allocation_mode = "MANUAL"
5056 else:
5057 allocation_mode = "DHCP"
5058
5059 if not nic_type:
5060 for vms in vapp.get_all_vms():
5061 vm_id = vms.get('id').split(':')[-1]
5062
5063 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5064
5065 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5066 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5067 response = self.perform_request(req_type='GET',
5068 url=url_rest_call,
5069 headers=headers)
5070
5071 if response.status_code == 403:
5072 response = self.retry_rest('GET', url_rest_call)
5073
5074 if response.status_code != 200:
5075 self.logger.error("REST call {} failed reason : {}"\
5076 "status code : {}".format(url_rest_call,
5077 response.content,
5078 response.status_code))
5079 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5080 "network connection section")
5081
5082 data = response.content
5083 data = data.split('<Link rel="edit"')[0]
5084 if '<PrimaryNetworkConnectionIndex>' not in data:
5085 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5086 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5087 <NetworkConnection network="{}">
5088 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5089 <IsConnected>true</IsConnected>
5090 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5091 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5092 allocation_mode)
5093 # Stub for ip_address feature
5094 if ip_address:
5095 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5096 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5097
5098 if mac_address:
5099 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5100 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5101
5102 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5103 else:
5104 self.logger.debug("add_network_adapter PrimaryNIC in data")
5105 new_item = """<NetworkConnection network="{}">
5106 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5107 <IsConnected>true</IsConnected>
5108 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5109 </NetworkConnection>""".format(network_name, nicIndex,
5110 allocation_mode)
5111 # Stub for ip_address feature
5112 if ip_address:
5113 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5114 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5115
5116 if mac_address:
5117 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5118 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5119
5120 data = data + new_item + '</NetworkConnectionSection>'
5121
5122 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5123
5124 response = self.perform_request(req_type='PUT',
5125 url=url_rest_call,
5126 headers=headers,
5127 data=data)
5128
5129 if response.status_code == 403:
5130 add_headers = {'Content-Type': headers['Content-Type']}
5131 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5132
5133 if response.status_code != 202:
5134 self.logger.error("REST call {} failed reason : {}"\
5135 "status code : {} ".format(url_rest_call,
5136 response.content,
5137 response.status_code))
5138 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5139 "network connection section")
5140 else:
5141 nic_task = self.get_task_from_response(response.content)
5142 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5143 if result.get('status') == 'success':
5144 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5145 "default NIC type".format(vm_id))
5146 else:
5147 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5148 "connect NIC type".format(vm_id))
5149 else:
5150 for vms in vapp.get_all_vms():
5151 vm_id = vms.get('id').split(':')[-1]
5152
5153 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5154
5155 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5156 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5157 response = self.perform_request(req_type='GET',
5158 url=url_rest_call,
5159 headers=headers)
5160
5161 if response.status_code == 403:
5162 response = self.retry_rest('GET', url_rest_call)
5163
5164 if response.status_code != 200:
5165 self.logger.error("REST call {} failed reason : {}"\
5166 "status code : {}".format(url_rest_call,
5167 response.content,
5168 response.status_code))
5169 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5170 "network connection section")
5171 data = response.content
5172 data = data.split('<Link rel="edit"')[0]
5173 vcd_netadapter_type = nic_type
5174 if nic_type in ['SR-IOV', 'VF']:
5175 vcd_netadapter_type = "SRIOVETHERNETCARD"
5176
5177 if '<PrimaryNetworkConnectionIndex>' not in data:
5178 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5179 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5180 <NetworkConnection network="{}">
5181 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5182 <IsConnected>true</IsConnected>
5183 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5184 <NetworkAdapterType>{}</NetworkAdapterType>
5185 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5186 allocation_mode, vcd_netadapter_type)
5187 # Stub for ip_address feature
5188 if ip_address:
5189 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5190 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5191
5192 if mac_address:
5193 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5194 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5195
5196 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5197 else:
5198 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5199 new_item = """<NetworkConnection network="{}">
5200 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5201 <IsConnected>true</IsConnected>
5202 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5203 <NetworkAdapterType>{}</NetworkAdapterType>
5204 </NetworkConnection>""".format(network_name, nicIndex,
5205 allocation_mode, vcd_netadapter_type)
5206 # Stub for ip_address feature
5207 if ip_address:
5208 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5209 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5210
5211 if mac_address:
5212 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5213 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5214
5215 data = data + new_item + '</NetworkConnectionSection>'
5216
5217 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5218
5219 response = self.perform_request(req_type='PUT',
5220 url=url_rest_call,
5221 headers=headers,
5222 data=data)
5223
5224 if response.status_code == 403:
5225 add_headers = {'Content-Type': headers['Content-Type']}
5226 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5227
5228 if response.status_code != 202:
5229 self.logger.error("REST call {} failed reason : {}"\
5230 "status code : {}".format(url_rest_call,
5231 response.content,
5232 response.status_code))
5233 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5234 "network connection section")
5235 else:
5236 nic_task = self.get_task_from_response(response.content)
5237 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5238 if result.get('status') == 'success':
5239 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5240 "conneced to NIC type {}".format(vm_id, nic_type))
5241 else:
5242 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5243 "failed to connect NIC type {}".format(vm_id, nic_type))
5244 except Exception as exp:
5245 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5246 "while adding Network adapter")
5247 raise vimconn.vimconnException(message=exp)
5248
5249
5250 def set_numa_affinity(self, vmuuid, paired_threads_id):
5251 """
5252 Method to assign numa affinity in vm configuration parammeters
5253 Args :
5254 vmuuid - vm uuid
5255 paired_threads_id - one or more virtual processor
5256 numbers
5257 Returns:
5258 return if True
5259 """
5260 try:
5261 vcenter_conect, content = self.get_vcenter_content()
5262 vm_moref_id = self.get_vm_moref_id(vmuuid)
5263
5264 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5265 if vm_obj:
5266 config_spec = vim.vm.ConfigSpec()
5267 config_spec.extraConfig = []
5268 opt = vim.option.OptionValue()
5269 opt.key = 'numa.nodeAffinity'
5270 opt.value = str(paired_threads_id)
5271 config_spec.extraConfig.append(opt)
5272 task = vm_obj.ReconfigVM_Task(config_spec)
5273 if task:
5274 result = self.wait_for_vcenter_task(task, vcenter_conect)
5275 extra_config = vm_obj.config.extraConfig
5276 flag = False
5277 for opts in extra_config:
5278 if 'numa.nodeAffinity' in opts.key:
5279 flag = True
5280 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5281 "value {} for vm {}".format(opt.value, vm_obj))
5282 if flag:
5283 return
5284 else:
5285 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5286 except Exception as exp:
5287 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5288 "for VM {} : {}".format(vm_obj, vm_moref_id))
5289 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5290 "affinity".format(exp))
5291
5292
5293 def cloud_init(self, vapp, cloud_config):
5294 """
5295 Method to inject ssh-key
5296 vapp - vapp object
5297 cloud_config a dictionary with:
5298 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5299 'users': (optional) list of users to be inserted, each item is a dict with:
5300 'name': (mandatory) user name,
5301 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5302 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5303 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5304 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5305 'dest': (mandatory) string with the destination absolute path
5306 'encoding': (optional, by default text). Can be one of:
5307 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5308 'content' (mandatory): string with the content of the file
5309 'permissions': (optional) string with file permissions, typically octal notation '0644'
5310 'owner': (optional) file owner, string with the format 'owner:group'
5311 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5312 """
5313 try:
5314 if not isinstance(cloud_config, dict):
5315 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5316 else:
5317 key_pairs = []
5318 userdata = []
5319 if "key-pairs" in cloud_config:
5320 key_pairs = cloud_config["key-pairs"]
5321
5322 if "users" in cloud_config:
5323 userdata = cloud_config["users"]
5324
5325 self.logger.debug("cloud_init : Guest os customization started..")
5326 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5327 customize_script = customize_script.replace("&","&amp;")
5328 self.guest_customization(vapp, customize_script)
5329
5330 except Exception as exp:
5331 self.logger.error("cloud_init : exception occurred while injecting "\
5332 "ssh-key")
5333 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5334 "ssh-key".format(exp))
5335
5336 def format_script(self, key_pairs=[], users_list=[]):
5337 bash_script = """#!/bin/sh
5338 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5339 if [ "$1" = "precustomization" ];then
5340 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5341 """
5342
5343 keys = "\n".join(key_pairs)
5344 if keys:
5345 keys_data = """
5346 if [ ! -d /root/.ssh ];then
5347 mkdir /root/.ssh
5348 chown root:root /root/.ssh
5349 chmod 700 /root/.ssh
5350 touch /root/.ssh/authorized_keys
5351 chown root:root /root/.ssh/authorized_keys
5352 chmod 600 /root/.ssh/authorized_keys
5353 # make centos with selinux happy
5354 which restorecon && restorecon -Rv /root/.ssh
5355 else
5356 touch /root/.ssh/authorized_keys
5357 chown root:root /root/.ssh/authorized_keys
5358 chmod 600 /root/.ssh/authorized_keys
5359 fi
5360 echo '{key}' >> /root/.ssh/authorized_keys
5361 """.format(key=keys)
5362
5363 bash_script+= keys_data
5364
5365 for user in users_list:
5366 if 'name' in user: user_name = user['name']
5367 if 'key-pairs' in user:
5368 user_keys = "\n".join(user['key-pairs'])
5369 else:
5370 user_keys = None
5371
5372 add_user_name = """
5373 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5374 """.format(user_name=user_name)
5375
5376 bash_script+= add_user_name
5377
5378 if user_keys:
5379 user_keys_data = """
5380 mkdir /home/{user_name}/.ssh
5381 chown {user_name}:{user_name} /home/{user_name}/.ssh
5382 chmod 700 /home/{user_name}/.ssh
5383 touch /home/{user_name}/.ssh/authorized_keys
5384 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5385 chmod 600 /home/{user_name}/.ssh/authorized_keys
5386 # make centos with selinux happy
5387 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5388 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5389 """.format(user_name=user_name,user_key=user_keys)
5390
5391 bash_script+= user_keys_data
5392
5393 return bash_script+"\n\tfi"
5394
5395 def guest_customization(self, vapp, customize_script):
5396 """
5397 Method to customize guest os
5398 vapp - Vapp object
5399 customize_script - Customize script to be run at first boot of VM.
5400 """
5401 for vm in vapp.get_all_vms():
5402 vm_id = vm.get('id').split(':')[-1]
5403 vm_name = vm.get('name')
5404 vm_name = vm_name.replace('_','-')
5405
5406 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5407 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5408 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5409
5410 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5411
5412 data = """<GuestCustomizationSection
5413 xmlns="http://www.vmware.com/vcloud/v1.5"
5414 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5415 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5416 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5417 <Enabled>true</Enabled>
5418 <ChangeSid>false</ChangeSid>
5419 <VirtualMachineId>{}</VirtualMachineId>
5420 <JoinDomainEnabled>false</JoinDomainEnabled>
5421 <UseOrgSettings>false</UseOrgSettings>
5422 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5423 <AdminPasswordAuto>true</AdminPasswordAuto>
5424 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5425 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5426 <ResetPasswordRequired>false</ResetPasswordRequired>
5427 <CustomizationScript>{}</CustomizationScript>
5428 <ComputerName>{}</ComputerName>
5429 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5430 </GuestCustomizationSection>
5431 """.format(vm_customization_url,
5432 vm_id,
5433 customize_script,
5434 vm_name,
5435 vm_customization_url)
5436
5437 response = self.perform_request(req_type='PUT',
5438 url=vm_customization_url,
5439 headers=headers,
5440 data=data)
5441 if response.status_code == 202:
5442 guest_task = self.get_task_from_response(response.content)
5443 self.client.get_task_monitor().wait_for_success(task=guest_task)
5444 self.logger.info("guest_customization : customized guest os task "\
5445 "completed for VM {}".format(vm_name))
5446 else:
5447 self.logger.error("guest_customization : task for customized guest os"\
5448 "failed for VM {}".format(vm_name))
5449 raise vimconn.vimconnException("guest_customization : failed to perform"\
5450 "guest os customization on VM {}".format(vm_name))
5451
5452 def add_new_disk(self, vapp_uuid, disk_size):
5453 """
5454 Method to create an empty vm disk
5455
5456 Args:
5457 vapp_uuid - is vapp identifier.
5458 disk_size - size of disk to be created in GB
5459
5460 Returns:
5461 None
5462 """
5463 status = False
5464 vm_details = None
5465 try:
5466 #Disk size in GB, convert it into MB
5467 if disk_size is not None:
5468 disk_size_mb = int(disk_size) * 1024
5469 vm_details = self.get_vapp_details_rest(vapp_uuid)
5470
5471 if vm_details and "vm_virtual_hardware" in vm_details:
5472 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5473 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5474 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5475
5476 except Exception as exp:
5477 msg = "Error occurred while creating new disk {}.".format(exp)
5478 self.rollback_newvm(vapp_uuid, msg)
5479
5480 if status:
5481 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5482 else:
5483 #If failed to add disk, delete VM
5484 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5485 self.rollback_newvm(vapp_uuid, msg)
5486
5487
5488 def add_new_disk_rest(self, disk_href, disk_size_mb):
5489 """
5490 Retrives vApp Disks section & add new empty disk
5491
5492 Args:
5493 disk_href: Disk section href to addd disk
5494 disk_size_mb: Disk size in MB
5495
5496 Returns: Status of add new disk task
5497 """
5498 status = False
5499 if self.client._session:
5500 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5501 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5502 response = self.perform_request(req_type='GET',
5503 url=disk_href,
5504 headers=headers)
5505
5506 if response.status_code == 403:
5507 response = self.retry_rest('GET', disk_href)
5508
5509 if response.status_code != requests.codes.ok:
5510 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5511 .format(disk_href, response.status_code))
5512 return status
5513 try:
5514 #Find but type & max of instance IDs assigned to disks
5515 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5516 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
5517 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5518 instance_id = 0
5519 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5520 if item.find("rasd:Description",namespaces).text == "Hard disk":
5521 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5522 if inst_id > instance_id:
5523 instance_id = inst_id
5524 disk_item = item.find("rasd:HostResource" ,namespaces)
5525 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5526 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5527
5528 instance_id = instance_id + 1
5529 new_item = """<Item>
5530 <rasd:Description>Hard disk</rasd:Description>
5531 <rasd:ElementName>New disk</rasd:ElementName>
5532 <rasd:HostResource
5533 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5534 vcloud:capacity="{}"
5535 vcloud:busSubType="{}"
5536 vcloud:busType="{}"></rasd:HostResource>
5537 <rasd:InstanceID>{}</rasd:InstanceID>
5538 <rasd:ResourceType>17</rasd:ResourceType>
5539 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5540
5541 new_data = response.content
5542 #Add new item at the bottom
5543 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5544
5545 # Send PUT request to modify virtual hardware section with new disk
5546 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5547
5548 response = self.perform_request(req_type='PUT',
5549 url=disk_href,
5550 data=new_data,
5551 headers=headers)
5552
5553 if response.status_code == 403:
5554 add_headers = {'Content-Type': headers['Content-Type']}
5555 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5556
5557 if response.status_code != 202:
5558 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5559 .format(disk_href, response.status_code, response.content))
5560 else:
5561 add_disk_task = self.get_task_from_response(response.content)
5562 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5563 if result.get('status') == 'success':
5564 status = True
5565 else:
5566 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5567
5568 except Exception as exp:
5569 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5570
5571 return status
5572
5573
5574 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5575 """
5576 Method to add existing disk to vm
5577 Args :
5578 catalogs - List of VDC catalogs
5579 image_id - Catalog ID
5580 template_name - Name of template in catalog
5581 vapp_uuid - UUID of vApp
5582 Returns:
5583 None
5584 """
5585 disk_info = None
5586 vcenter_conect, content = self.get_vcenter_content()
5587 #find moref-id of vm in image
5588 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5589 image_id=image_id,
5590 )
5591
5592 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5593 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5594 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5595 if catalog_vm_moref_id:
5596 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5597 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5598 if catalog_vm_obj:
5599 #find existing disk
5600 disk_info = self.find_disk(catalog_vm_obj)
5601 else:
5602 exp_msg = "No VM with image id {} found".format(image_id)
5603 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5604 else:
5605 exp_msg = "No Image found with image ID {} ".format(image_id)
5606 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5607
5608 if disk_info:
5609 self.logger.info("Existing disk_info : {}".format(disk_info))
5610 #get VM
5611 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5612 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5613 if vm_obj:
5614 status = self.add_disk(vcenter_conect=vcenter_conect,
5615 vm=vm_obj,
5616 disk_info=disk_info,
5617 size=size,
5618 vapp_uuid=vapp_uuid
5619 )
5620 if status:
5621 self.logger.info("Disk from image id {} added to {}".format(image_id,
5622 vm_obj.config.name)
5623 )
5624 else:
5625 msg = "No disk found with image id {} to add in VM {}".format(
5626 image_id,
5627 vm_obj.config.name)
5628 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5629
5630
5631 def find_disk(self, vm_obj):
5632 """
5633 Method to find details of existing disk in VM
5634 Args :
5635 vm_obj - vCenter object of VM
5636 image_id - Catalog ID
5637 Returns:
5638 disk_info : dict of disk details
5639 """
5640 disk_info = {}
5641 if vm_obj:
5642 try:
5643 devices = vm_obj.config.hardware.device
5644 for device in devices:
5645 if type(device) is vim.vm.device.VirtualDisk:
5646 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5647 disk_info["full_path"] = device.backing.fileName
5648 disk_info["datastore"] = device.backing.datastore
5649 disk_info["capacityKB"] = device.capacityInKB
5650 break
5651 except Exception as exp:
5652 self.logger.error("find_disk() : exception occurred while "\
5653 "getting existing disk details :{}".format(exp))
5654 return disk_info
5655
5656
5657 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5658 """
5659 Method to add existing disk in VM
5660 Args :
5661 vcenter_conect - vCenter content object
5662 vm - vCenter vm object
5663 disk_info : dict of disk details
5664 Returns:
5665 status : status of add disk task
5666 """
5667 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5668 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5669 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5670 if size is not None:
5671 #Convert size from GB to KB
5672 sizeKB = int(size) * 1024 * 1024
5673 #compare size of existing disk and user given size.Assign whicherver is greater
5674 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5675 sizeKB, capacityKB))
5676 if sizeKB > capacityKB:
5677 capacityKB = sizeKB
5678
5679 if datastore and fullpath and capacityKB:
5680 try:
5681 spec = vim.vm.ConfigSpec()
5682 # get all disks on a VM, set unit_number to the next available
5683 unit_number = 0
5684 for dev in vm.config.hardware.device:
5685 if hasattr(dev.backing, 'fileName'):
5686 unit_number = int(dev.unitNumber) + 1
5687 # unit_number 7 reserved for scsi controller
5688 if unit_number == 7:
5689 unit_number += 1
5690 if isinstance(dev, vim.vm.device.VirtualDisk):
5691 #vim.vm.device.VirtualSCSIController
5692 controller_key = dev.controllerKey
5693
5694 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5695 unit_number, controller_key))
5696 # add disk here
5697 dev_changes = []
5698 disk_spec = vim.vm.device.VirtualDeviceSpec()
5699 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5700 disk_spec.device = vim.vm.device.VirtualDisk()
5701 disk_spec.device.backing = \
5702 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5703 disk_spec.device.backing.thinProvisioned = True
5704 disk_spec.device.backing.diskMode = 'persistent'
5705 disk_spec.device.backing.datastore = datastore
5706 disk_spec.device.backing.fileName = fullpath
5707
5708 disk_spec.device.unitNumber = unit_number
5709 disk_spec.device.capacityInKB = capacityKB
5710 disk_spec.device.controllerKey = controller_key
5711 dev_changes.append(disk_spec)
5712 spec.deviceChange = dev_changes
5713 task = vm.ReconfigVM_Task(spec=spec)
5714 status = self.wait_for_vcenter_task(task, vcenter_conect)
5715 return status
5716 except Exception as exp:
5717 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5718 "{} to vm {}".format(exp,
5719 fullpath,
5720 vm.config.name)
5721 self.rollback_newvm(vapp_uuid, exp_msg)
5722 else:
5723 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5724 self.rollback_newvm(vapp_uuid, msg)
5725
5726
5727 def get_vcenter_content(self):
5728 """
5729 Get the vsphere content object
5730 """
5731 try:
5732 vm_vcenter_info = self.get_vm_vcenter_info()
5733 except Exception as exp:
5734 self.logger.error("Error occurred while getting vCenter infromationn"\
5735 " for VM : {}".format(exp))
5736 raise vimconn.vimconnException(message=exp)
5737
5738 context = None
5739 if hasattr(ssl, '_create_unverified_context'):
5740 context = ssl._create_unverified_context()
5741
5742 vcenter_conect = SmartConnect(
5743 host=vm_vcenter_info["vm_vcenter_ip"],
5744 user=vm_vcenter_info["vm_vcenter_user"],
5745 pwd=vm_vcenter_info["vm_vcenter_password"],
5746 port=int(vm_vcenter_info["vm_vcenter_port"]),
5747 sslContext=context
5748 )
5749 atexit.register(Disconnect, vcenter_conect)
5750 content = vcenter_conect.RetrieveContent()
5751 return vcenter_conect, content
5752
5753
5754 def get_vm_moref_id(self, vapp_uuid):
5755 """
5756 Get the moref_id of given VM
5757 """
5758 try:
5759 if vapp_uuid:
5760 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5761 if vm_details and "vm_vcenter_info" in vm_details:
5762 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5763 return vm_moref_id
5764
5765 except Exception as exp:
5766 self.logger.error("Error occurred while getting VM moref ID "\
5767 " for VM : {}".format(exp))
5768 return None
5769
5770
5771 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5772 """
5773 Method to get vApp template details
5774 Args :
5775 catalogs - list of VDC catalogs
5776 image_id - Catalog ID to find
5777 template_name : template name in catalog
5778 Returns:
5779 parsed_respond : dict of vApp tempalte details
5780 """
5781 parsed_response = {}
5782
5783 vca = self.connect_as_admin()
5784 if not vca:
5785 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5786
5787 try:
5788 org, vdc = self.get_vdc_details()
5789 catalog = self.get_catalog_obj(image_id, catalogs)
5790 if catalog:
5791 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5792 catalog_items = [items.attrib]
5793
5794 if len(catalog_items) == 1:
5795 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5796 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5797
5798 response = self.perform_request(req_type='GET',
5799 url=catalog_items[0].get('href'),
5800 headers=headers)
5801 catalogItem = XmlElementTree.fromstring(response.content)
5802 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5803 vapp_tempalte_href = entity.get("href")
5804 #get vapp details and parse moref id
5805
5806 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5807 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5808 'vmw': 'http://www.vmware.com/schema/ovf',
5809 'vm': 'http://www.vmware.com/vcloud/v1.5',
5810 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5811 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5812 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5813 }
5814
5815 if vca._session:
5816 response = self.perform_request(req_type='GET',
5817 url=vapp_tempalte_href,
5818 headers=headers)
5819
5820 if response.status_code != requests.codes.ok:
5821 self.logger.debug("REST API call {} failed. Return status code {}".format(
5822 vapp_tempalte_href, response.status_code))
5823
5824 else:
5825 xmlroot_respond = XmlElementTree.fromstring(response.content)
5826 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5827 if children_section is not None:
5828 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5829 if vCloud_extension_section is not None:
5830 vm_vcenter_info = {}
5831 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5832 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5833 if vmext is not None:
5834 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5835 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5836
5837 except Exception as exp :
5838 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5839
5840 return parsed_response
5841
5842
5843 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5844 """
5845 Method to delete vApp
5846 Args :
5847 vapp_uuid - vApp UUID
5848 msg - Error message to be logged
5849 exp_type : Exception type
5850 Returns:
5851 None
5852 """
5853 if vapp_uuid:
5854 status = self.delete_vminstance(vapp_uuid)
5855 else:
5856 msg = "No vApp ID"
5857 self.logger.error(msg)
5858 if exp_type == "Genric":
5859 raise vimconn.vimconnException(msg)
5860 elif exp_type == "NotFound":
5861 raise vimconn.vimconnNotFoundException(message=msg)
5862
5863 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5864 """
5865 Method to attach SRIOV adapters to VM
5866
5867 Args:
5868 vapp_uuid - uuid of vApp/VM
5869 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5870 vmname_andid - vmname
5871
5872 Returns:
5873 The status of add SRIOV adapter task , vm object and
5874 vcenter_conect object
5875 """
5876 vm_obj = None
5877 vcenter_conect, content = self.get_vcenter_content()
5878 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5879
5880 if vm_moref_id:
5881 try:
5882 no_of_sriov_devices = len(sriov_nets)
5883 if no_of_sriov_devices > 0:
5884 #Get VM and its host
5885 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5886 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5887 if host_obj and vm_obj:
5888 #get SRIOV devies from host on which vapp is currently installed
5889 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5890 no_of_sriov_devices,
5891 )
5892
5893 if len(avilable_sriov_devices) == 0:
5894 #find other hosts with active pci devices
5895 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5896 content,
5897 no_of_sriov_devices,
5898 )
5899
5900 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5901 #Migrate vm to the host where SRIOV devices are available
5902 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5903 new_host_obj))
5904 task = self.relocate_vm(new_host_obj, vm_obj)
5905 if task is not None:
5906 result = self.wait_for_vcenter_task(task, vcenter_conect)
5907 self.logger.info("Migrate VM status: {}".format(result))
5908 host_obj = new_host_obj
5909 else:
5910 self.logger.info("Fail to migrate VM : {}".format(result))
5911 raise vimconn.vimconnNotFoundException(
5912 "Fail to migrate VM : {} to host {}".format(
5913 vmname_andid,
5914 new_host_obj)
5915 )
5916
5917 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5918 #Add SRIOV devices one by one
5919 for sriov_net in sriov_nets:
5920 network_name = sriov_net.get('net_id')
5921 dvs_portgr_name = self.create_dvPort_group(network_name)
5922 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
5923 #add vlan ID ,Modify portgroup for vlan ID
5924 self.configure_vlanID(content, vcenter_conect, network_name)
5925
5926 task = self.add_sriov_to_vm(content,
5927 vm_obj,
5928 host_obj,
5929 network_name,
5930 avilable_sriov_devices[0]
5931 )
5932 if task:
5933 status= self.wait_for_vcenter_task(task, vcenter_conect)
5934 if status:
5935 self.logger.info("Added SRIOV {} to VM {}".format(
5936 no_of_sriov_devices,
5937 str(vm_obj)))
5938 else:
5939 self.logger.error("Fail to add SRIOV {} to VM {}".format(
5940 no_of_sriov_devices,
5941 str(vm_obj)))
5942 raise vimconn.vimconnUnexpectedResponse(
5943 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
5944 )
5945 return True, vm_obj, vcenter_conect
5946 else:
5947 self.logger.error("Currently there is no host with"\
5948 " {} number of avaialble SRIOV "\
5949 "VFs required for VM {}".format(
5950 no_of_sriov_devices,
5951 vmname_andid)
5952 )
5953 raise vimconn.vimconnNotFoundException(
5954 "Currently there is no host with {} "\
5955 "number of avaialble SRIOV devices required for VM {}".format(
5956 no_of_sriov_devices,
5957 vmname_andid))
5958 else:
5959 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
5960
5961 except vmodl.MethodFault as error:
5962 self.logger.error("Error occurred while adding SRIOV {} ",error)
5963 return None, vm_obj, vcenter_conect
5964
5965
5966 def get_sriov_devices(self,host, no_of_vfs):
5967 """
5968 Method to get the details of SRIOV devices on given host
5969 Args:
5970 host - vSphere host object
5971 no_of_vfs - number of VFs needed on host
5972
5973 Returns:
5974 array of SRIOV devices
5975 """
5976 sriovInfo=[]
5977 if host:
5978 for device in host.config.pciPassthruInfo:
5979 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
5980 if device.numVirtualFunction >= no_of_vfs:
5981 sriovInfo.append(device)
5982 break
5983 return sriovInfo
5984
5985
5986 def get_host_and_sriov_devices(self, content, no_of_vfs):
5987 """
5988 Method to get the details of SRIOV devices infromation on all hosts
5989
5990 Args:
5991 content - vSphere host object
5992 no_of_vfs - number of pci VFs needed on host
5993
5994 Returns:
5995 array of SRIOV devices and host object
5996 """
5997 host_obj = None
5998 sriov_device_objs = None
5999 try:
6000 if content:
6001 container = content.viewManager.CreateContainerView(content.rootFolder,
6002 [vim.HostSystem], True)
6003 for host in container.view:
6004 devices = self.get_sriov_devices(host, no_of_vfs)
6005 if devices:
6006 host_obj = host
6007 sriov_device_objs = devices
6008 break
6009 except Exception as exp:
6010 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6011
6012 return host_obj,sriov_device_objs
6013
6014
6015 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6016 """
6017 Method to add SRIOV adapter to vm
6018
6019 Args:
6020 host_obj - vSphere host object
6021 vm_obj - vSphere vm object
6022 content - vCenter content object
6023 network_name - name of distributed virtaul portgroup
6024 sriov_device - SRIOV device info
6025
6026 Returns:
6027 task object
6028 """
6029 devices = []
6030 vnic_label = "sriov nic"
6031 try:
6032 dvs_portgr = self.get_dvport_group(network_name)
6033 network_name = dvs_portgr.name
6034 nic = vim.vm.device.VirtualDeviceSpec()
6035 # VM device
6036 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6037 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6038 nic.device.addressType = 'assigned'
6039 #nic.device.key = 13016
6040 nic.device.deviceInfo = vim.Description()
6041 nic.device.deviceInfo.label = vnic_label
6042 nic.device.deviceInfo.summary = network_name
6043 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6044
6045 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6046 nic.device.backing.deviceName = network_name
6047 nic.device.backing.useAutoDetect = False
6048 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6049 nic.device.connectable.startConnected = True
6050 nic.device.connectable.allowGuestControl = True
6051
6052 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6053 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6054 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6055
6056 devices.append(nic)
6057 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6058 task = vm_obj.ReconfigVM_Task(vmconf)
6059 return task
6060 except Exception as exp:
6061 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6062 return None
6063
6064
6065 def create_dvPort_group(self, network_name):
6066 """
6067 Method to create disributed virtual portgroup
6068
6069 Args:
6070 network_name - name of network/portgroup
6071
6072 Returns:
6073 portgroup key
6074 """
6075 try:
6076 new_network_name = [network_name, '-', str(uuid.uuid4())]
6077 network_name=''.join(new_network_name)
6078 vcenter_conect, content = self.get_vcenter_content()
6079
6080 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6081 if dv_switch:
6082 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6083 dv_pg_spec.name = network_name
6084
6085 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6086 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6087 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6088 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6089 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6090 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6091
6092 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6093 self.wait_for_vcenter_task(task, vcenter_conect)
6094
6095 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6096 if dvPort_group:
6097 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6098 return dvPort_group.key
6099 else:
6100 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6101
6102 except Exception as exp:
6103 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6104 " : {}".format(network_name, exp))
6105 return None
6106
6107 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6108 """
6109 Method to reconfigure disributed virtual portgroup
6110
6111 Args:
6112 dvPort_group_name - name of disributed virtual portgroup
6113 content - vCenter content object
6114 config_info - disributed virtual portgroup configuration
6115
6116 Returns:
6117 task object
6118 """
6119 try:
6120 dvPort_group = self.get_dvport_group(dvPort_group_name)
6121 if dvPort_group:
6122 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6123 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6124 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6125 if "vlanID" in config_info:
6126 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6127 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6128
6129 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6130 return task
6131 else:
6132 return None
6133 except Exception as exp:
6134 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6135 " : {}".format(dvPort_group_name, exp))
6136 return None
6137
6138
6139 def destroy_dvport_group(self , dvPort_group_name):
6140 """
6141 Method to destroy disributed virtual portgroup
6142
6143 Args:
6144 network_name - name of network/portgroup
6145
6146 Returns:
6147 True if portgroup successfully got deleted else false
6148 """
6149 vcenter_conect, content = self.get_vcenter_content()
6150 try:
6151 status = None
6152 dvPort_group = self.get_dvport_group(dvPort_group_name)
6153 if dvPort_group:
6154 task = dvPort_group.Destroy_Task()
6155 status = self.wait_for_vcenter_task(task, vcenter_conect)
6156 return status
6157 except vmodl.MethodFault as exp:
6158 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6159 exp, dvPort_group_name))
6160 return None
6161
6162
6163 def get_dvport_group(self, dvPort_group_name):
6164 """
6165 Method to get disributed virtual portgroup
6166
6167 Args:
6168 network_name - name of network/portgroup
6169
6170 Returns:
6171 portgroup object
6172 """
6173 vcenter_conect, content = self.get_vcenter_content()
6174 dvPort_group = None
6175 try:
6176 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6177 for item in container.view:
6178 if item.key == dvPort_group_name:
6179 dvPort_group = item
6180 break
6181 return dvPort_group
6182 except vmodl.MethodFault as exp:
6183 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6184 exp, dvPort_group_name))
6185 return None
6186
6187 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6188 """
6189 Method to get disributed virtual portgroup vlanID
6190
6191 Args:
6192 network_name - name of network/portgroup
6193
6194 Returns:
6195 vlan ID
6196 """
6197 vlanId = None
6198 try:
6199 dvPort_group = self.get_dvport_group(dvPort_group_name)
6200 if dvPort_group:
6201 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6202 except vmodl.MethodFault as exp:
6203 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6204 exp, dvPort_group_name))
6205 return vlanId
6206
6207
6208 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6209 """
6210 Method to configure vlanID in disributed virtual portgroup vlanID
6211
6212 Args:
6213 network_name - name of network/portgroup
6214
6215 Returns:
6216 None
6217 """
6218 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6219 if vlanID == 0:
6220 #configure vlanID
6221 vlanID = self.genrate_vlanID(dvPort_group_name)
6222 config = {"vlanID":vlanID}
6223 task = self.reconfig_portgroup(content, dvPort_group_name,
6224 config_info=config)
6225 if task:
6226 status= self.wait_for_vcenter_task(task, vcenter_conect)
6227 if status:
6228 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6229 dvPort_group_name,vlanID))
6230 else:
6231 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6232 dvPort_group_name, vlanID))
6233
6234
6235 def genrate_vlanID(self, network_name):
6236 """
6237 Method to get unused vlanID
6238 Args:
6239 network_name - name of network/portgroup
6240 Returns:
6241 vlanID
6242 """
6243 vlan_id = None
6244 used_ids = []
6245 if self.config.get('vlanID_range') == None:
6246 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6247 "at config value before creating sriov network with vlan tag")
6248 if "used_vlanIDs" not in self.persistent_info:
6249 self.persistent_info["used_vlanIDs"] = {}
6250 else:
6251 used_ids = list(self.persistent_info["used_vlanIDs"].values())
6252
6253 for vlanID_range in self.config.get('vlanID_range'):
6254 start_vlanid, end_vlanid = vlanID_range.split("-")
6255 if start_vlanid > end_vlanid:
6256 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6257 vlanID_range))
6258
6259 for id in range(int(start_vlanid), int(end_vlanid) + 1):
6260 if id not in used_ids:
6261 vlan_id = id
6262 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6263 return vlan_id
6264 if vlan_id is None:
6265 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6266
6267
6268 def get_obj(self, content, vimtype, name):
6269 """
6270 Get the vsphere object associated with a given text name
6271 """
6272 obj = None
6273 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6274 for item in container.view:
6275 if item.name == name:
6276 obj = item
6277 break
6278 return obj
6279
6280
6281 def insert_media_to_vm(self, vapp, image_id):
6282 """
6283 Method to insert media CD-ROM (ISO image) from catalog to vm.
6284 vapp - vapp object to get vm id
6285 Image_id - image id for cdrom to be inerted to vm
6286 """
6287 # create connection object
6288 vca = self.connect()
6289 try:
6290 # fetching catalog details
6291 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6292 if vca._session:
6293 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6294 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6295 response = self.perform_request(req_type='GET',
6296 url=rest_url,
6297 headers=headers)
6298
6299 if response.status_code != 200:
6300 self.logger.error("REST call {} failed reason : {}"\
6301 "status code : {}".format(url_rest_call,
6302 response.content,
6303 response.status_code))
6304 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6305 "catalog details")
6306 # searching iso name and id
6307 iso_name,media_id = self.get_media_details(vca, response.content)
6308
6309 if iso_name and media_id:
6310 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6311 <ns6:MediaInsertOrEjectParams
6312 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6313 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6314 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6315 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6316 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6317 xmlns:ns7="http://www.vmware.com/schema/ovf"
6318 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6319 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6320 <ns6:Media
6321 type="application/vnd.vmware.vcloud.media+xml"
6322 name="{}"
6323 id="urn:vcloud:media:{}"
6324 href="https://{}/api/media/{}"/>
6325 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6326 self.url,media_id)
6327
6328 for vms in vapp.get_all_vms():
6329 vm_id = vms.get('id').split(':')[-1]
6330
6331 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6332 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6333
6334 response = self.perform_request(req_type='POST',
6335 url=rest_url,
6336 data=data,
6337 headers=headers)
6338
6339 if response.status_code != 202:
6340 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6341 "Status code {}".format(response.text, response.status_code)
6342 self.logger.error(error_msg)
6343 raise vimconn.vimconnException(error_msg)
6344 else:
6345 task = self.get_task_from_response(response.content)
6346 result = self.client.get_task_monitor().wait_for_success(task=task)
6347 if result.get('status') == 'success':
6348 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6349 " image to vm {}".format(vm_id))
6350
6351 except Exception as exp:
6352 self.logger.error("insert_media_to_vm() : exception occurred "\
6353 "while inserting media CD-ROM")
6354 raise vimconn.vimconnException(message=exp)
6355
6356
6357 def get_media_details(self, vca, content):
6358 """
6359 Method to get catalog item details
6360 vca - connection object
6361 content - Catalog details
6362 Return - Media name, media id
6363 """
6364 cataloghref_list = []
6365 try:
6366 if content:
6367 vm_list_xmlroot = XmlElementTree.fromstring(content)
6368 for child in vm_list_xmlroot.iter():
6369 if 'CatalogItem' in child.tag:
6370 cataloghref_list.append(child.attrib.get('href'))
6371 if cataloghref_list is not None:
6372 for href in cataloghref_list:
6373 if href:
6374 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6375 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6376 response = self.perform_request(req_type='GET',
6377 url=href,
6378 headers=headers)
6379 if response.status_code != 200:
6380 self.logger.error("REST call {} failed reason : {}"\
6381 "status code : {}".format(href,
6382 response.content,
6383 response.status_code))
6384 raise vimconn.vimconnException("get_media_details : Failed to get "\
6385 "catalogitem details")
6386 list_xmlroot = XmlElementTree.fromstring(response.content)
6387 for child in list_xmlroot.iter():
6388 if 'Entity' in child.tag:
6389 if 'media' in child.attrib.get('href'):
6390 name = child.attrib.get('name')
6391 media_id = child.attrib.get('href').split('/').pop()
6392 return name,media_id
6393 else:
6394 self.logger.debug("Media name and id not found")
6395 return False,False
6396 except Exception as exp:
6397 self.logger.error("get_media_details : exception occurred "\
6398 "getting media details")
6399 raise vimconn.vimconnException(message=exp)
6400
6401
6402 def retry_rest(self, method, url, add_headers=None, data=None):
6403 """ Method to get Token & retry respective REST request
6404 Args:
6405 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6406 url - request url to be used
6407 add_headers - Additional headers (optional)
6408 data - Request payload data to be passed in request
6409 Returns:
6410 response - Response of request
6411 """
6412 response = None
6413
6414 #Get token
6415 self.get_token()
6416
6417 if self.client._session:
6418 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6419 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6420
6421 if add_headers:
6422 headers.update(add_headers)
6423
6424 if method == 'GET':
6425 response = self.perform_request(req_type='GET',
6426 url=url,
6427 headers=headers)
6428 elif method == 'PUT':
6429 response = self.perform_request(req_type='PUT',
6430 url=url,
6431 headers=headers,
6432 data=data)
6433 elif method == 'POST':
6434 response = self.perform_request(req_type='POST',
6435 url=url,
6436 headers=headers,
6437 data=data)
6438 elif method == 'DELETE':
6439 response = self.perform_request(req_type='DELETE',
6440 url=url,
6441 headers=headers)
6442 return response
6443
6444
6445 def get_token(self):
6446 """ Generate a new token if expired
6447
6448 Returns:
6449 The return client object that letter can be used to connect to vCloud director as admin for VDC
6450 """
6451 try:
6452 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6453 self.user,
6454 self.org_name))
6455 host = self.url
6456 client = Client(host, verify_ssl_certs=False)
6457 client.set_highest_supported_version()
6458 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6459 # connection object
6460 self.client = client
6461
6462 except:
6463 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6464 "{} as user: {}".format(self.org_name, self.user))
6465
6466 if not client:
6467 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6468
6469
6470 def get_vdc_details(self):
6471 """ Get VDC details using pyVcloud Lib
6472
6473 Returns org and vdc object
6474 """
6475 vdc = None
6476 try:
6477 org = Org(self.client, resource=self.client.get_org())
6478 vdc = org.get_vdc(self.tenant_name)
6479 except Exception as e:
6480 # pyvcloud not giving a specific exception, Refresh nevertheless
6481 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6482
6483 #Retry once, if failed by refreshing token
6484 if vdc is None:
6485 self.get_token()
6486 org = Org(self.client, resource=self.client.get_org())
6487 vdc = org.get_vdc(self.tenant_name)
6488
6489 return org, vdc
6490
6491
6492 def perform_request(self, req_type, url, headers=None, data=None):
6493 """Perform the POST/PUT/GET/DELETE request."""
6494
6495 #Log REST request details
6496 self.log_request(req_type, url=url, headers=headers, data=data)
6497 # perform request and return its result
6498 if req_type == 'GET':
6499 response = requests.get(url=url,
6500 headers=headers,
6501 verify=False)
6502 elif req_type == 'PUT':
6503 response = requests.put(url=url,
6504 headers=headers,
6505 data=data,
6506 verify=False)
6507 elif req_type == 'POST':
6508 response = requests.post(url=url,
6509 headers=headers,
6510 data=data,
6511 verify=False)
6512 elif req_type == 'DELETE':
6513 response = requests.delete(url=url,
6514 headers=headers,
6515 verify=False)
6516 #Log the REST response
6517 self.log_response(response)
6518
6519 return response
6520
6521
6522 def log_request(self, req_type, url=None, headers=None, data=None):
6523 """Logs REST request details"""
6524
6525 if req_type is not None:
6526 self.logger.debug("Request type: {}".format(req_type))
6527
6528 if url is not None:
6529 self.logger.debug("Request url: {}".format(url))
6530
6531 if headers is not None:
6532 for header in headers:
6533 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6534
6535 if data is not None:
6536 self.logger.debug("Request data: {}".format(data))
6537
6538
6539 def log_response(self, response):
6540 """Logs REST response details"""
6541
6542 self.logger.debug("Response status code: {} ".format(response.status_code))
6543
6544
6545 def get_task_from_response(self, content):
6546 """
6547 content - API response content(response.content)
6548 return task object
6549 """
6550 xmlroot = XmlElementTree.fromstring(content)
6551 if xmlroot.tag.split('}')[1] == "Task":
6552 return xmlroot
6553 else:
6554 for ele in xmlroot:
6555 if ele.tag.split("}")[1] == "Tasks":
6556 task = ele[0]
6557 break
6558 return task
6559
6560
6561 def power_on_vapp(self,vapp_id, vapp_name):
6562 """
6563 vapp_id - vApp uuid
6564 vapp_name - vAapp name
6565 return - Task object
6566 """
6567 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6568 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6569
6570 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6571 vapp_id)
6572 response = self.perform_request(req_type='POST',
6573 url=poweron_href,
6574 headers=headers)
6575
6576 if response.status_code != 202:
6577 self.logger.error("REST call {} failed reason : {}"\
6578 "status code : {} ".format(poweron_href,
6579 response.content,
6580 response.status_code))
6581 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6582 "vApp {}".format(vapp_name))
6583 else:
6584 poweron_task = self.get_task_from_response(response.content)
6585 return poweron_task
6586
6587