dd3bba5f250a5be6fcdcf567cddb5f0bf0f8db40
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
46 from pyvcloud.vcd.vdc import VDC
47 from pyvcloud.vcd.org import Org
48 import re
49 from pyvcloud.vcd.vapp import VApp
50 from xml.sax.saxutils import escape
51 import logging
52 import json
53 import time
54 import uuid
55 import httplib
56 #For python3
57 #import http.client
58 import hashlib
59 import socket
60 import struct
61 import netaddr
62 import random
63
64 # global variable for vcd connector type
65 STANDALONE = 'standalone'
66
67 # key for flavor dicts
68 FLAVOR_RAM_KEY = 'ram'
69 FLAVOR_VCPUS_KEY = 'vcpus'
70 FLAVOR_DISK_KEY = 'disk'
71 DEFAULT_IP_PROFILE = {'dhcp_count':50,
72 'dhcp_enabled':True,
73 'ip_version':"IPv4"
74 }
75 # global variable for wait time
76 INTERVAL_TIME = 5
77 MAX_WAIT_TIME = 1800
78
79 API_VERSION = '5.9'
80
81 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
82 __date__ = "$09-Mar-2018 11:09:29$"
83 __version__ = '0.2'
84
85 # -1: "Could not be created",
86 # 0: "Unresolved",
87 # 1: "Resolved",
88 # 2: "Deployed",
89 # 3: "Suspended",
90 # 4: "Powered on",
91 # 5: "Waiting for user input",
92 # 6: "Unknown state",
93 # 7: "Unrecognized state",
94 # 8: "Powered off",
95 # 9: "Inconsistent state",
96 # 10: "Children do not all have the same status",
97 # 11: "Upload initiated, OVF descriptor pending",
98 # 12: "Upload initiated, copying contents",
99 # 13: "Upload initiated , disk contents pending",
100 # 14: "Upload has been quarantined",
101 # 15: "Upload quarantine period has expired"
102
103 # mapping vCD status to MANO
104 vcdStatusCode2manoFormat = {4: 'ACTIVE',
105 7: 'PAUSED',
106 3: 'SUSPENDED',
107 8: 'INACTIVE',
108 12: 'BUILD',
109 -1: 'ERROR',
110 14: 'DELETED'}
111
112 #
113 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
114 'ERROR': 'ERROR', 'DELETED': 'DELETED'
115 }
116
117 class vimconnector(vimconn.vimconnector):
118 # dict used to store flavor in memory
119 flavorlist = {}
120
121 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
122 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
123 """
124 Constructor create vmware connector to vCloud director.
125
126 By default construct doesn't validate connection state. So client can create object with None arguments.
127 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
128
129 a) It initialize organization UUID
130 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
131
132 Args:
133 uuid - is organization uuid.
134 name - is organization name that must be presented in vCloud director.
135 tenant_id - is VDC uuid it must be presented in vCloud director
136 tenant_name - is VDC name.
137 url - is hostname or ip address of vCloud director
138 url_admin - same as above.
139 user - is user that administrator for organization. Caller must make sure that
140 username has right privileges.
141
142 password - is password for a user.
143
144 VMware connector also requires PVDC administrative privileges and separate account.
145 This variables must be passed via config argument dict contains keys
146
147 dict['admin_username']
148 dict['admin_password']
149 config - Provide NSX and vCenter information
150
151 Returns:
152 Nothing.
153 """
154
155 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
156 url_admin, user, passwd, log_level, config)
157
158 self.logger = logging.getLogger('openmano.vim.vmware')
159 self.logger.setLevel(10)
160 self.persistent_info = persistent_info
161
162 self.name = name
163 self.id = uuid
164 self.url = url
165 self.url_admin = url_admin
166 self.tenant_id = tenant_id
167 self.tenant_name = tenant_name
168 self.user = user
169 self.passwd = passwd
170 self.config = config
171 self.admin_password = None
172 self.admin_user = None
173 self.org_name = ""
174 self.nsx_manager = None
175 self.nsx_user = None
176 self.nsx_password = None
177 self.availability_zone = None
178
179 # Disable warnings from self-signed certificates.
180 requests.packages.urllib3.disable_warnings()
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 #Set availability zone for Affinity rules
214 self.availability_zone = self.set_availability_zones()
215
216 # ############# Stub code for SRIOV #################
217 # try:
218 # self.dvs_name = config['dv_switch_name']
219 # except KeyError:
220 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
221 #
222 # self.vlanID_range = config.get("vlanID_range", None)
223
224 self.org_uuid = None
225 self.client = None
226
227 if not url:
228 raise vimconn.vimconnException('url param can not be NoneType')
229
230 if not self.url_admin: # try to use normal url
231 self.url_admin = self.url
232
233 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
234 self.tenant_id, self.tenant_name))
235 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
236 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
237
238 # initialize organization
239 if self.user is not None and self.passwd is not None and self.url:
240 self.init_organization()
241
242 def __getitem__(self, index):
243 if index == 'name':
244 return self.name
245 if index == 'tenant_id':
246 return self.tenant_id
247 if index == 'tenant_name':
248 return self.tenant_name
249 elif index == 'id':
250 return self.id
251 elif index == 'org_name':
252 return self.org_name
253 elif index == 'org_uuid':
254 return self.org_uuid
255 elif index == 'user':
256 return self.user
257 elif index == 'passwd':
258 return self.passwd
259 elif index == 'url':
260 return self.url
261 elif index == 'url_admin':
262 return self.url_admin
263 elif index == "config":
264 return self.config
265 else:
266 raise KeyError("Invalid key '%s'" % str(index))
267
268 def __setitem__(self, index, value):
269 if index == 'name':
270 self.name = value
271 if index == 'tenant_id':
272 self.tenant_id = value
273 if index == 'tenant_name':
274 self.tenant_name = value
275 elif index == 'id':
276 self.id = value
277 elif index == 'org_name':
278 self.org_name = value
279 elif index == 'org_uuid':
280 self.org_uuid = value
281 elif index == 'user':
282 self.user = value
283 elif index == 'passwd':
284 self.passwd = value
285 elif index == 'url':
286 self.url = value
287 elif index == 'url_admin':
288 self.url_admin = value
289 else:
290 raise KeyError("Invalid key '%s'" % str(index))
291
292 def connect_as_admin(self):
293 """ Method connect as pvdc admin user to vCloud director.
294 There are certain action that can be done only by provider vdc admin user.
295 Organization creation / provider network creation etc.
296
297 Returns:
298 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
299 """
300
301 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
302
303 try:
304 host = self.url
305 org = 'System'
306 client_as_admin = Client(host, verify_ssl_certs=False)
307 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
308 except Exception as e:
309 raise vimconn.vimconnException(
310 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
311
312 return client_as_admin
313
314 def connect(self):
315 """ Method connect as normal user to vCloud director.
316
317 Returns:
318 The return client object that latter can be used to connect to vCloud director as admin for VDC
319 """
320
321 try:
322 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
323 self.user,
324 self.org_name))
325 host = self.url
326 client = Client(host, verify_ssl_certs=False)
327 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
328 except:
329 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
330 "{} as user: {}".format(self.org_name, self.user))
331
332 return client
333
334 def init_organization(self):
335 """ Method initialize organization UUID and VDC parameters.
336
337 At bare minimum client must provide organization name that present in vCloud director and VDC.
338
339 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
340 The Org - UUID will be initialized at the run time if data center present in vCloud director.
341
342 Returns:
343 The return vca object that letter can be used to connect to vcloud direct as admin
344 """
345 client = self.connect()
346 if not client:
347 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
348
349 self.client = client
350 try:
351 if self.org_uuid is None:
352 org_list = client.get_org_list()
353 for org in org_list.Org:
354 # we set org UUID at the init phase but we can do it only when we have valid credential.
355 if org.get('name') == self.org_name:
356 self.org_uuid = org.get('href').split('/')[-1]
357 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
358 break
359 else:
360 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
361
362 # if well good we require for org details
363 org_details_dict = self.get_org(org_uuid=self.org_uuid)
364
365 # we have two case if we want to initialize VDC ID or VDC name at run time
366 # tenant_name provided but no tenant id
367 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
368 vdcs_dict = org_details_dict['vdcs']
369 for vdc in vdcs_dict:
370 if vdcs_dict[vdc] == self.tenant_name:
371 self.tenant_id = vdc
372 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
373 self.org_name))
374 break
375 else:
376 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
377 # case two we have tenant_id but we don't have tenant name so we find and set it.
378 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
379 vdcs_dict = org_details_dict['vdcs']
380 for vdc in vdcs_dict:
381 if vdc == self.tenant_id:
382 self.tenant_name = vdcs_dict[vdc]
383 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
384 self.org_name))
385 break
386 else:
387 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
388 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
389 except:
390 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
391 self.logger.debug(traceback.format_exc())
392 self.org_uuid = None
393
394 def new_tenant(self, tenant_name=None, tenant_description=None):
395 """ Method adds a new tenant to VIM with this name.
396 This action requires access to create VDC action in vCloud director.
397
398 Args:
399 tenant_name is tenant_name to be created.
400 tenant_description not used for this call
401
402 Return:
403 returns the tenant identifier in UUID format.
404 If action is failed method will throw vimconn.vimconnException method
405 """
406 vdc_task = self.create_vdc(vdc_name=tenant_name)
407 if vdc_task is not None:
408 vdc_uuid, value = vdc_task.popitem()
409 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
410 return vdc_uuid
411 else:
412 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
413
414 def delete_tenant(self, tenant_id=None):
415 """ Delete a tenant from VIM
416 Args:
417 tenant_id is tenant_id to be deleted.
418
419 Return:
420 returns the tenant identifier in UUID format.
421 If action is failed method will throw exception
422 """
423 vca = self.connect_as_admin()
424 if not vca:
425 raise vimconn.vimconnConnectionException("Failed to connect vCD")
426
427 if tenant_id is not None:
428 if vca._session:
429 #Get OrgVDC
430 url_list = [self.url, '/api/vdc/', tenant_id]
431 orgvdc_herf = ''.join(url_list)
432
433 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
434 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
435 response = self.perform_request(req_type='GET',
436 url=orgvdc_herf,
437 headers=headers)
438
439 if response.status_code != requests.codes.ok:
440 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
441 "Return status code {}".format(orgvdc_herf,
442 response.status_code))
443 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
444
445 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
446 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
447 #For python3
448 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
449 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
450 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
451 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
452
453 response = self.perform_request(req_type='DELETE',
454 url=vdc_remove_href,
455 headers=headers)
456
457 if response.status_code == 202:
458 time.sleep(5)
459 return tenant_id
460 else:
461 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
462 "Return status code {}".format(vdc_remove_href,
463 response.status_code))
464 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
465 else:
466 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
467 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
468
469
470 def get_tenant_list(self, filter_dict={}):
471 """Obtain tenants of VIM
472 filter_dict can contain the following keys:
473 name: filter by tenant name
474 id: filter by tenant uuid/id
475 <other VIM specific>
476 Returns the tenant list of dictionaries:
477 [{'name':'<name>, 'id':'<id>, ...}, ...]
478
479 """
480 org_dict = self.get_org(self.org_uuid)
481 vdcs_dict = org_dict['vdcs']
482
483 vdclist = []
484 try:
485 for k in vdcs_dict:
486 entry = {'name': vdcs_dict[k], 'id': k}
487 # if caller didn't specify dictionary we return all tenants.
488 if filter_dict is not None and filter_dict:
489 filtered_entry = entry.copy()
490 filtered_dict = set(entry.keys()) - set(filter_dict)
491 for unwanted_key in filtered_dict: del entry[unwanted_key]
492 if filter_dict == entry:
493 vdclist.append(filtered_entry)
494 else:
495 vdclist.append(entry)
496 except:
497 self.logger.debug("Error in get_tenant_list()")
498 self.logger.debug(traceback.format_exc())
499 raise vimconn.vimconnException("Incorrect state. {}")
500
501 return vdclist
502
503 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
504 """Adds a tenant network to VIM
505 net_name is the name
506 net_type can be 'bridge','data'.'ptp'.
507 ip_profile is a dict containing the IP parameters of the network
508 shared is a boolean
509 Returns the network identifier"""
510
511 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
512 .format(net_name, net_type, ip_profile, shared))
513
514 isshared = 'false'
515 if shared:
516 isshared = 'true'
517
518 # ############# Stub code for SRIOV #################
519 # if net_type == "data" or net_type == "ptp":
520 # if self.config.get('dv_switch_name') == None:
521 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
522 # network_uuid = self.create_dvPort_group(net_name)
523
524 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
525 ip_profile=ip_profile, isshared=isshared)
526 if network_uuid is not None:
527 return network_uuid
528 else:
529 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
530
531 def get_vcd_network_list(self):
532 """ Method available organization for a logged in tenant
533
534 Returns:
535 The return vca object that letter can be used to connect to vcloud direct as admin
536 """
537
538 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
539
540 if not self.tenant_name:
541 raise vimconn.vimconnConnectionException("Tenant name is empty.")
542
543 org, vdc = self.get_vdc_details()
544 if vdc is None:
545 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
546
547 vdc_uuid = vdc.get('id').split(":")[3]
548 if self.client._session:
549 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
550 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
551 response = self.perform_request(req_type='GET',
552 url=vdc.get('href'),
553 headers=headers)
554 if response.status_code != 200:
555 self.logger.error("Failed to get vdc content")
556 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
557 else:
558 content = XmlElementTree.fromstring(response.content)
559
560 network_list = []
561 try:
562 for item in content:
563 if item.tag.split('}')[-1] == 'AvailableNetworks':
564 for net in item:
565 response = self.perform_request(req_type='GET',
566 url=net.get('href'),
567 headers=headers)
568
569 if response.status_code != 200:
570 self.logger.error("Failed to get network content")
571 raise vimconn.vimconnNotFoundException("Failed to get network content")
572 else:
573 net_details = XmlElementTree.fromstring(response.content)
574
575 filter_dict = {}
576 net_uuid = net_details.get('id').split(":")
577 if len(net_uuid) != 4:
578 continue
579 else:
580 net_uuid = net_uuid[3]
581 # create dict entry
582 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
583 vdc_uuid,
584 net_details.get('name')))
585 filter_dict["name"] = net_details.get('name')
586 filter_dict["id"] = net_uuid
587 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
588 shared = True
589 else:
590 shared = False
591 filter_dict["shared"] = shared
592 filter_dict["tenant_id"] = vdc_uuid
593 if int(net_details.get('status')) == 1:
594 filter_dict["admin_state_up"] = True
595 else:
596 filter_dict["admin_state_up"] = False
597 filter_dict["status"] = "ACTIVE"
598 filter_dict["type"] = "bridge"
599 network_list.append(filter_dict)
600 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
601 except:
602 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
603 pass
604
605 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
606 return network_list
607
608 def get_network_list(self, filter_dict={}):
609 """Obtain tenant networks of VIM
610 Filter_dict can be:
611 name: network name OR/AND
612 id: network uuid OR/AND
613 shared: boolean OR/AND
614 tenant_id: tenant OR/AND
615 admin_state_up: boolean
616 status: 'ACTIVE'
617
618 [{key : value , key : value}]
619
620 Returns the network list of dictionaries:
621 [{<the fields at Filter_dict plus some VIM specific>}, ...]
622 List can be empty
623 """
624
625 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
626
627 if not self.tenant_name:
628 raise vimconn.vimconnConnectionException("Tenant name is empty.")
629
630 org, vdc = self.get_vdc_details()
631 if vdc is None:
632 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
633
634 try:
635 vdcid = vdc.get('id').split(":")[3]
636
637 if self.client._session:
638 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
639 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
640 response = self.perform_request(req_type='GET',
641 url=vdc.get('href'),
642 headers=headers)
643 if response.status_code != 200:
644 self.logger.error("Failed to get vdc content")
645 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
646 else:
647 content = XmlElementTree.fromstring(response.content)
648
649 network_list = []
650 for item in content:
651 if item.tag.split('}')[-1] == 'AvailableNetworks':
652 for net in item:
653 response = self.perform_request(req_type='GET',
654 url=net.get('href'),
655 headers=headers)
656
657 if response.status_code != 200:
658 self.logger.error("Failed to get network content")
659 raise vimconn.vimconnNotFoundException("Failed to get network content")
660 else:
661 net_details = XmlElementTree.fromstring(response.content)
662
663 filter_entry = {}
664 net_uuid = net_details.get('id').split(":")
665 if len(net_uuid) != 4:
666 continue
667 else:
668 net_uuid = net_uuid[3]
669 # create dict entry
670 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
671 vdcid,
672 net_details.get('name')))
673 filter_entry["name"] = net_details.get('name')
674 filter_entry["id"] = net_uuid
675 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
676 shared = True
677 else:
678 shared = False
679 filter_entry["shared"] = shared
680 filter_entry["tenant_id"] = vdcid
681 if int(net_details.get('status')) == 1:
682 filter_entry["admin_state_up"] = True
683 else:
684 filter_entry["admin_state_up"] = False
685 filter_entry["status"] = "ACTIVE"
686 filter_entry["type"] = "bridge"
687 filtered_entry = filter_entry.copy()
688
689 if filter_dict is not None and filter_dict:
690 # we remove all the key : value we don't care and match only
691 # respected field
692 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
693 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
694 if filter_dict == filter_entry:
695 network_list.append(filtered_entry)
696 else:
697 network_list.append(filtered_entry)
698 except Exception as e:
699 self.logger.debug("Error in get_network_list",exc_info=True)
700 if isinstance(e, vimconn.vimconnException):
701 raise
702 else:
703 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
704
705 self.logger.debug("Returning {}".format(network_list))
706 return network_list
707
708 def get_network(self, net_id):
709 """Method obtains network details of net_id VIM network
710 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
711
712 try:
713 org, vdc = self.get_vdc_details()
714 vdc_id = vdc.get('id').split(":")[3]
715 if self.client._session:
716 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
717 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
718 response = self.perform_request(req_type='GET',
719 url=vdc.get('href'),
720 headers=headers)
721 if response.status_code != 200:
722 self.logger.error("Failed to get vdc content")
723 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
724 else:
725 content = XmlElementTree.fromstring(response.content)
726
727 filter_dict = {}
728
729 for item in content:
730 if item.tag.split('}')[-1] == 'AvailableNetworks':
731 for net in item:
732 response = self.perform_request(req_type='GET',
733 url=net.get('href'),
734 headers=headers)
735
736 if response.status_code != 200:
737 self.logger.error("Failed to get network content")
738 raise vimconn.vimconnNotFoundException("Failed to get network content")
739 else:
740 net_details = XmlElementTree.fromstring(response.content)
741
742 vdc_network_id = net_details.get('id').split(":")
743 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
744 filter_dict["name"] = net_details.get('name')
745 filter_dict["id"] = vdc_network_id[3]
746 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
747 shared = True
748 else:
749 shared = False
750 filter_dict["shared"] = shared
751 filter_dict["tenant_id"] = vdc_id
752 if int(net_details.get('status')) == 1:
753 filter_dict["admin_state_up"] = True
754 else:
755 filter_dict["admin_state_up"] = False
756 filter_dict["status"] = "ACTIVE"
757 filter_dict["type"] = "bridge"
758 self.logger.debug("Returning {}".format(filter_dict))
759 return filter_dict
760 else:
761 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
762 except Exception as e:
763 self.logger.debug("Error in get_network")
764 self.logger.debug(traceback.format_exc())
765 if isinstance(e, vimconn.vimconnException):
766 raise
767 else:
768 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
769
770 return filter_dict
771
772 def delete_network(self, net_id):
773 """
774 Method Deletes a tenant network from VIM, provide the network id.
775
776 Returns the network identifier or raise an exception
777 """
778
779 # ############# Stub code for SRIOV #################
780 # dvport_group = self.get_dvport_group(net_id)
781 # if dvport_group:
782 # #delete portgroup
783 # status = self.destroy_dvport_group(net_id)
784 # if status:
785 # # Remove vlanID from persistent info
786 # if net_id in self.persistent_info["used_vlanIDs"]:
787 # del self.persistent_info["used_vlanIDs"][net_id]
788 #
789 # return net_id
790
791 vcd_network = self.get_vcd_network(network_uuid=net_id)
792 if vcd_network is not None and vcd_network:
793 if self.delete_network_action(network_uuid=net_id):
794 return net_id
795 else:
796 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
797
798 def refresh_nets_status(self, net_list):
799 """Get the status of the networks
800 Params: the list of network identifiers
801 Returns a dictionary with:
802 net_id: #VIM id of this network
803 status: #Mandatory. Text with one of:
804 # DELETED (not found at vim)
805 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
806 # OTHER (Vim reported other status not understood)
807 # ERROR (VIM indicates an ERROR status)
808 # ACTIVE, INACTIVE, DOWN (admin down),
809 # BUILD (on building process)
810 #
811 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
812 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
813
814 """
815
816 dict_entry = {}
817 try:
818 for net in net_list:
819 errormsg = ''
820 vcd_network = self.get_vcd_network(network_uuid=net)
821 if vcd_network is not None and vcd_network:
822 if vcd_network['status'] == '1':
823 status = 'ACTIVE'
824 else:
825 status = 'DOWN'
826 else:
827 status = 'DELETED'
828 errormsg = 'Network not found.'
829
830 dict_entry[net] = {'status': status, 'error_msg': errormsg,
831 'vim_info': yaml.safe_dump(vcd_network)}
832 except:
833 self.logger.debug("Error in refresh_nets_status")
834 self.logger.debug(traceback.format_exc())
835
836 return dict_entry
837
838 def get_flavor(self, flavor_id):
839 """Obtain flavor details from the VIM
840 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
841 """
842 if flavor_id not in vimconnector.flavorlist:
843 raise vimconn.vimconnNotFoundException("Flavor not found.")
844 return vimconnector.flavorlist[flavor_id]
845
846 def new_flavor(self, flavor_data):
847 """Adds a tenant flavor to VIM
848 flavor_data contains a dictionary with information, keys:
849 name: flavor name
850 ram: memory (cloud type) in MBytes
851 vpcus: cpus (cloud type)
852 extended: EPA parameters
853 - numas: #items requested in same NUMA
854 memory: number of 1G huge pages memory
855 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
856 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
857 - name: interface name
858 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
859 bandwidth: X Gbps; requested guarantee bandwidth
860 vpci: requested virtual PCI address
861 disk: disk size
862 is_public:
863 #TODO to concrete
864 Returns the flavor identifier"""
865
866 # generate a new uuid put to internal dict and return it.
867 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
868 new_flavor=flavor_data
869 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
870 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
871 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
872
873 if not isinstance(ram, int):
874 raise vimconn.vimconnException("Non-integer value for ram")
875 elif not isinstance(cpu, int):
876 raise vimconn.vimconnException("Non-integer value for cpu")
877 elif not isinstance(disk, int):
878 raise vimconn.vimconnException("Non-integer value for disk")
879
880 extended_flv = flavor_data.get("extended")
881 if extended_flv:
882 numas=extended_flv.get("numas")
883 if numas:
884 for numa in numas:
885 #overwrite ram and vcpus
886 if 'memory' in numa:
887 ram = numa['memory']*1024
888 if 'paired-threads' in numa:
889 cpu = numa['paired-threads']*2
890 elif 'cores' in numa:
891 cpu = numa['cores']
892 elif 'threads' in numa:
893 cpu = numa['threads']
894
895 new_flavor[FLAVOR_RAM_KEY] = ram
896 new_flavor[FLAVOR_VCPUS_KEY] = cpu
897 new_flavor[FLAVOR_DISK_KEY] = disk
898 # generate a new uuid put to internal dict and return it.
899 flavor_id = uuid.uuid4()
900 vimconnector.flavorlist[str(flavor_id)] = new_flavor
901 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
902
903 return str(flavor_id)
904
905 def delete_flavor(self, flavor_id):
906 """Deletes a tenant flavor from VIM identify by its id
907
908 Returns the used id or raise an exception
909 """
910 if flavor_id not in vimconnector.flavorlist:
911 raise vimconn.vimconnNotFoundException("Flavor not found.")
912
913 vimconnector.flavorlist.pop(flavor_id, None)
914 return flavor_id
915
916 def new_image(self, image_dict):
917 """
918 Adds a tenant image to VIM
919 Returns:
920 200, image-id if the image is created
921 <0, message if there is an error
922 """
923
924 return self.get_image_id_from_path(image_dict['location'])
925
926 def delete_image(self, image_id):
927 """
928 Deletes a tenant image from VIM
929 Args:
930 image_id is ID of Image to be deleted
931 Return:
932 returns the image identifier in UUID format or raises an exception on error
933 """
934 conn = self.connect_as_admin()
935 if not conn:
936 raise vimconn.vimconnConnectionException("Failed to connect vCD")
937 # Get Catalog details
938 url_list = [self.url, '/api/catalog/', image_id]
939 catalog_herf = ''.join(url_list)
940
941 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
942 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
943
944 response = self.perform_request(req_type='GET',
945 url=catalog_herf,
946 headers=headers)
947
948 if response.status_code != requests.codes.ok:
949 self.logger.debug("delete_image():GET REST API call {} failed. "\
950 "Return status code {}".format(catalog_herf,
951 response.status_code))
952 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
953
954 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
955 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
956 #For python3
957 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
958 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
959
960 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
961 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
962 for catalogItem in catalogItems:
963 catalogItem_href = catalogItem.attrib['href']
964
965 response = self.perform_request(req_type='GET',
966 url=catalogItem_href,
967 headers=headers)
968
969 if response.status_code != requests.codes.ok:
970 self.logger.debug("delete_image():GET REST API call {} failed. "\
971 "Return status code {}".format(catalog_herf,
972 response.status_code))
973 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
974 catalogItem,
975 image_id))
976
977 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
978 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
979 #For python3
980 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
981 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
982 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
983
984 #Remove catalogItem
985 response = self.perform_request(req_type='DELETE',
986 url=catalogitem_remove_href,
987 headers=headers)
988 if response.status_code == requests.codes.no_content:
989 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
990 else:
991 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
992
993 #Remove catalog
994 url_list = [self.url, '/api/admin/catalog/', image_id]
995 catalog_remove_herf = ''.join(url_list)
996 response = self.perform_request(req_type='DELETE',
997 url=catalog_remove_herf,
998 headers=headers)
999
1000 if response.status_code == requests.codes.no_content:
1001 self.logger.debug("Deleted Catalog {}".format(image_id))
1002 return image_id
1003 else:
1004 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1005
1006
1007 def catalog_exists(self, catalog_name, catalogs):
1008 """
1009
1010 :param catalog_name:
1011 :param catalogs:
1012 :return:
1013 """
1014 for catalog in catalogs:
1015 if catalog['name'] == catalog_name:
1016 return True
1017 return False
1018
1019 def create_vimcatalog(self, vca=None, catalog_name=None):
1020 """ Create new catalog entry in vCloud director.
1021
1022 Args
1023 vca: vCloud director.
1024 catalog_name catalog that client wish to create. Note no validation done for a name.
1025 Client must make sure that provide valid string representation.
1026
1027 Return (bool) True if catalog created.
1028
1029 """
1030 try:
1031 result = vca.create_catalog(catalog_name, catalog_name)
1032 if result is not None:
1033 return True
1034 catalogs = vca.list_catalogs()
1035 except:
1036 return False
1037 return self.catalog_exists(catalog_name, catalogs)
1038
1039 # noinspection PyIncorrectDocstring
1040 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1041 description='', progress=False, chunk_bytes=128 * 1024):
1042 """
1043 Uploads a OVF file to a vCloud catalog
1044
1045 :param chunk_bytes:
1046 :param progress:
1047 :param description:
1048 :param image_name:
1049 :param vca:
1050 :param catalog_name: (str): The name of the catalog to upload the media.
1051 :param media_file_name: (str): The name of the local media file to upload.
1052 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1053 """
1054 os.path.isfile(media_file_name)
1055 statinfo = os.stat(media_file_name)
1056
1057 # find a catalog entry where we upload OVF.
1058 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1059 # status change.
1060 # if VCD can parse OVF we upload VMDK file
1061 try:
1062 for catalog in vca.list_catalogs():
1063 if catalog_name != catalog['name']:
1064 continue
1065 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1066 data = """
1067 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1068 """.format(catalog_name, description)
1069
1070 if self.client:
1071 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1072 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1073 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1074
1075 response = self.perform_request(req_type='POST',
1076 url=catalog_href,
1077 headers=headers,
1078 data=data)
1079
1080 if response.status_code == requests.codes.created:
1081 catalogItem = XmlElementTree.fromstring(response.content)
1082 entity = [child for child in catalogItem if
1083 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1084 href = entity.get('href')
1085 template = href
1086
1087 response = self.perform_request(req_type='GET',
1088 url=href,
1089 headers=headers)
1090
1091 if response.status_code == requests.codes.ok:
1092 headers['Content-Type'] = 'Content-Type text/xml'
1093 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1094 if result:
1095 transfer_href = result.group(1)
1096
1097 response = self.perform_request(req_type='PUT',
1098 url=transfer_href,
1099 headers=headers,
1100 data=open(media_file_name, 'rb'))
1101 if response.status_code != requests.codes.ok:
1102 self.logger.debug(
1103 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1104 media_file_name))
1105 return False
1106
1107 # TODO fix this with aync block
1108 time.sleep(5)
1109
1110 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1111
1112 # uploading VMDK file
1113 # check status of OVF upload and upload remaining files.
1114 response = self.perform_request(req_type='GET',
1115 url=template,
1116 headers=headers)
1117
1118 if response.status_code == requests.codes.ok:
1119 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1120 if result:
1121 link_href = result.group(1)
1122 # we skip ovf since it already uploaded.
1123 if 'ovf' in link_href:
1124 continue
1125 # The OVF file and VMDK must be in a same directory
1126 head, tail = os.path.split(media_file_name)
1127 file_vmdk = head + '/' + link_href.split("/")[-1]
1128 if not os.path.isfile(file_vmdk):
1129 return False
1130 statinfo = os.stat(file_vmdk)
1131 if statinfo.st_size == 0:
1132 return False
1133 hrefvmdk = link_href
1134
1135 if progress:
1136 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1137 FileTransferSpeed()]
1138 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1139
1140 bytes_transferred = 0
1141 f = open(file_vmdk, 'rb')
1142 while bytes_transferred < statinfo.st_size:
1143 my_bytes = f.read(chunk_bytes)
1144 if len(my_bytes) <= chunk_bytes:
1145 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1146 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1147 headers['Content-Length'] = str(len(my_bytes))
1148 response = requests.put(url=hrefvmdk,
1149 headers=headers,
1150 data=my_bytes,
1151 verify=False)
1152 if response.status_code == requests.codes.ok:
1153 bytes_transferred += len(my_bytes)
1154 if progress:
1155 progress_bar.update(bytes_transferred)
1156 else:
1157 self.logger.debug(
1158 'file upload failed with error: [%s] %s' % (response.status_code,
1159 response.content))
1160
1161 f.close()
1162 return False
1163 f.close()
1164 if progress:
1165 progress_bar.finish()
1166 time.sleep(10)
1167 return True
1168 else:
1169 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1170 format(catalog_name, media_file_name))
1171 return False
1172 except Exception as exp:
1173 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1174 .format(catalog_name,media_file_name, exp))
1175 raise vimconn.vimconnException(
1176 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1177 .format(catalog_name,media_file_name, exp))
1178
1179 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1180 return False
1181
1182 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1183 """Upload media file"""
1184 # TODO add named parameters for readability
1185
1186 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1187 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1188
1189 def validate_uuid4(self, uuid_string=None):
1190 """ Method validate correct format of UUID.
1191
1192 Return: true if string represent valid uuid
1193 """
1194 try:
1195 val = uuid.UUID(uuid_string, version=4)
1196 except ValueError:
1197 return False
1198 return True
1199
1200 def get_catalogid(self, catalog_name=None, catalogs=None):
1201 """ Method check catalog and return catalog ID in UUID format.
1202
1203 Args
1204 catalog_name: catalog name as string
1205 catalogs: list of catalogs.
1206
1207 Return: catalogs uuid
1208 """
1209
1210 for catalog in catalogs:
1211 if catalog['name'] == catalog_name:
1212 catalog_id = catalog['id']
1213 return catalog_id
1214 return None
1215
1216 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1217 """ Method check catalog and return catalog name lookup done by catalog UUID.
1218
1219 Args
1220 catalog_name: catalog name as string
1221 catalogs: list of catalogs.
1222
1223 Return: catalogs name or None
1224 """
1225
1226 if not self.validate_uuid4(uuid_string=catalog_uuid):
1227 return None
1228
1229 for catalog in catalogs:
1230 catalog_id = catalog.get('id')
1231 if catalog_id == catalog_uuid:
1232 return catalog.get('name')
1233 return None
1234
1235 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1236 """ Method check catalog and return catalog name lookup done by catalog UUID.
1237
1238 Args
1239 catalog_name: catalog name as string
1240 catalogs: list of catalogs.
1241
1242 Return: catalogs name or None
1243 """
1244
1245 if not self.validate_uuid4(uuid_string=catalog_uuid):
1246 return None
1247
1248 for catalog in catalogs:
1249 catalog_id = catalog.get('id')
1250 if catalog_id == catalog_uuid:
1251 return catalog
1252 return None
1253
1254 def get_image_id_from_path(self, path=None, progress=False):
1255 """ Method upload OVF image to vCloud director.
1256
1257 Each OVF image represented as single catalog entry in vcloud director.
1258 The method check for existing catalog entry. The check done by file name without file extension.
1259
1260 if given catalog name already present method will respond with existing catalog uuid otherwise
1261 it will create new catalog entry and upload OVF file to newly created catalog.
1262
1263 If method can't create catalog entry or upload a file it will throw exception.
1264
1265 Method accept boolean flag progress that will output progress bar. It useful method
1266 for standalone upload use case. In case to test large file upload.
1267
1268 Args
1269 path: - valid path to OVF file.
1270 progress - boolean progress bar show progress bar.
1271
1272 Return: if image uploaded correct method will provide image catalog UUID.
1273 """
1274
1275 if not path:
1276 raise vimconn.vimconnException("Image path can't be None.")
1277
1278 if not os.path.isfile(path):
1279 raise vimconn.vimconnException("Can't read file. File not found.")
1280
1281 if not os.access(path, os.R_OK):
1282 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1283
1284 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1285
1286 dirpath, filename = os.path.split(path)
1287 flname, file_extension = os.path.splitext(path)
1288 if file_extension != '.ovf':
1289 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1290 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1291
1292 catalog_name = os.path.splitext(filename)[0]
1293 catalog_md5_name = hashlib.md5(path).hexdigest()
1294 self.logger.debug("File name {} Catalog Name {} file path {} "
1295 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1296
1297 try:
1298 org,vdc = self.get_vdc_details()
1299 catalogs = org.list_catalogs()
1300 except Exception as exp:
1301 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1302 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1303
1304 if len(catalogs) == 0:
1305 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1306 result = self.create_vimcatalog(org, catalog_md5_name)
1307 if not result:
1308 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1309
1310 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1311 media_name=filename, medial_file_name=path, progress=progress)
1312 if not result:
1313 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1314 return self.get_catalogid(catalog_name, catalogs)
1315 else:
1316 for catalog in catalogs:
1317 # search for existing catalog if we find same name we return ID
1318 # TODO optimize this
1319 if catalog['name'] == catalog_md5_name:
1320 self.logger.debug("Found existing catalog entry for {} "
1321 "catalog id {}".format(catalog_name,
1322 self.get_catalogid(catalog_md5_name, catalogs)))
1323 return self.get_catalogid(catalog_md5_name, catalogs)
1324
1325 # if we didn't find existing catalog we create a new one and upload image.
1326 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1327 result = self.create_vimcatalog(org, catalog_md5_name)
1328 if not result:
1329 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1330
1331 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1332 media_name=filename, medial_file_name=path, progress=progress)
1333 if not result:
1334 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1335
1336 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1337
1338 def get_image_list(self, filter_dict={}):
1339 '''Obtain tenant images from VIM
1340 Filter_dict can be:
1341 name: image name
1342 id: image uuid
1343 checksum: image checksum
1344 location: image path
1345 Returns the image list of dictionaries:
1346 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1347 List can be empty
1348 '''
1349
1350 try:
1351 org, vdc = self.get_vdc_details()
1352 image_list = []
1353 catalogs = org.list_catalogs()
1354 if len(catalogs) == 0:
1355 return image_list
1356 else:
1357 for catalog in catalogs:
1358 catalog_uuid = catalog.get('id')
1359 name = catalog.get('name')
1360 filtered_dict = {}
1361 if filter_dict.get("name") and filter_dict["name"] != name:
1362 continue
1363 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1364 continue
1365 filtered_dict ["name"] = name
1366 filtered_dict ["id"] = catalog_uuid
1367 image_list.append(filtered_dict)
1368
1369 self.logger.debug("List of already created catalog items: {}".format(image_list))
1370 return image_list
1371 except Exception as exp:
1372 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1373
1374 def get_vappid(self, vdc=None, vapp_name=None):
1375 """ Method takes vdc object and vApp name and returns vapp uuid or None
1376
1377 Args:
1378 vdc: The VDC object.
1379 vapp_name: is application vappp name identifier
1380
1381 Returns:
1382 The return vApp name otherwise None
1383 """
1384 if vdc is None or vapp_name is None:
1385 return None
1386 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1387 try:
1388 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1389 vdc.ResourceEntities.ResourceEntity)
1390 #For python3
1391 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1392 # if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1393 if len(refs) == 1:
1394 return refs[0].href.split("vapp")[1][1:]
1395 except Exception as e:
1396 self.logger.exception(e)
1397 return False
1398 return None
1399
1400 def check_vapp(self, vdc=None, vapp_uuid=None):
1401 """ Method Method returns True or False if vapp deployed in vCloud director
1402
1403 Args:
1404 vca: Connector to VCA
1405 vdc: The VDC object.
1406 vappid: vappid is application identifier
1407
1408 Returns:
1409 The return True if vApp deployed
1410 :param vdc:
1411 :param vapp_uuid:
1412 """
1413 try:
1414 refs = filter(lambda ref:
1415 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1416 vdc.ResourceEntities.ResourceEntity)
1417 #For python3
1418 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1419 # if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1420 for ref in refs:
1421 vappid = ref.href.split("vapp")[1][1:]
1422 # find vapp with respected vapp uuid
1423 if vappid == vapp_uuid:
1424 return True
1425 except Exception as e:
1426 self.logger.exception(e)
1427 return False
1428 return False
1429
1430 def get_namebyvappid(self, vapp_uuid=None):
1431 """Method returns vApp name from vCD and lookup done by vapp_id.
1432
1433 Args:
1434 vapp_uuid: vappid is application identifier
1435
1436 Returns:
1437 The return vApp name otherwise None
1438 """
1439 try:
1440 if self.client and vapp_uuid:
1441 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1442 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1443 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1444
1445 response = self.perform_request(req_type='GET',
1446 url=vapp_call,
1447 headers=headers)
1448 #Retry login if session expired & retry sending request
1449 if response.status_code == 403:
1450 response = self.retry_rest('GET', vapp_call)
1451
1452 tree = XmlElementTree.fromstring(response.content)
1453 return tree.attrib['name']
1454 except Exception as e:
1455 self.logger.exception(e)
1456 return None
1457 return None
1458
1459 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1460 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1461 """Adds a VM instance to VIM
1462 Params:
1463 'start': (boolean) indicates if VM must start or created in pause mode.
1464 'image_id','flavor_id': image and flavor VIM id to use for the VM
1465 'net_list': list of interfaces, each one is a dictionary with:
1466 'name': (optional) name for the interface.
1467 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1468 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1469 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1470 'mac_address': (optional) mac address to assign to this interface
1471 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1472 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1473 'type': (mandatory) can be one of:
1474 'virtual', in this case always connected to a network of type 'net_type=bridge'
1475 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1476 can created unconnected
1477 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1478 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1479 are allocated on the same physical NIC
1480 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1481 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1482 or True, it must apply the default VIM behaviour
1483 After execution the method will add the key:
1484 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1485 interface. 'net_list' is modified
1486 'cloud_config': (optional) dictionary with:
1487 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1488 'users': (optional) list of users to be inserted, each item is a dict with:
1489 'name': (mandatory) user name,
1490 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1491 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1492 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1493 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1494 'dest': (mandatory) string with the destination absolute path
1495 'encoding': (optional, by default text). Can be one of:
1496 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1497 'content' (mandatory): string with the content of the file
1498 'permissions': (optional) string with file permissions, typically octal notation '0644'
1499 'owner': (optional) file owner, string with the format 'owner:group'
1500 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1501 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1502 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1503 'size': (mandatory) string with the size of the disk in GB
1504 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1505 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1506 availability_zone_index is None
1507 Returns a tuple with the instance identifier and created_items or raises an exception on error
1508 created_items can be None or a dictionary where this method can include key-values that will be passed to
1509 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1510 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1511 as not present.
1512 """
1513 self.logger.info("Creating new instance for entry {}".format(name))
1514 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1515 "availability_zone_index {} availability_zone_list {}"\
1516 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1517 availability_zone_index, availability_zone_list))
1518
1519 #new vm name = vmname + tenant_id + uuid
1520 new_vm_name = [name, '-', str(uuid.uuid4())]
1521 vmname_andid = ''.join(new_vm_name)
1522
1523 for net in net_list:
1524 if net['type'] == "SR-IOV" or net['type'] == "PCI-PASSTHROUGH":
1525 raise vimconn.vimconnNotSupportedException(
1526 "Current vCD version does not support type : {}".format(net['type']))
1527
1528 if len(net_list) > 10:
1529 raise vimconn.vimconnNotSupportedException(
1530 "The VM hardware versions 7 and above support upto 10 NICs only")
1531
1532 # if vm already deployed we return existing uuid
1533 # we check for presence of VDC, Catalog entry and Flavor.
1534 org, vdc = self.get_vdc_details()
1535 if vdc is None:
1536 raise vimconn.vimconnNotFoundException(
1537 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1538 catalogs = org.list_catalogs()
1539 if catalogs is None:
1540 #Retry once, if failed by refreshing token
1541 self.get_token()
1542 org = Org(self.client, resource=self.client.get_org())
1543 catalogs = org.list_catalogs()
1544 if catalogs is None:
1545 raise vimconn.vimconnNotFoundException(
1546 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1547
1548 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1549 if catalog_hash_name:
1550 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1551 else:
1552 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1553 "(Failed retrieve catalog information {})".format(name, image_id))
1554
1555
1556 # Set vCPU and Memory based on flavor.
1557 vm_cpus = None
1558 vm_memory = None
1559 vm_disk = None
1560 numas = None
1561
1562 if flavor_id is not None:
1563 if flavor_id not in vimconnector.flavorlist:
1564 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1565 "Failed retrieve flavor information "
1566 "flavor id {}".format(name, flavor_id))
1567 else:
1568 try:
1569 flavor = vimconnector.flavorlist[flavor_id]
1570 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1571 vm_memory = flavor[FLAVOR_RAM_KEY]
1572 vm_disk = flavor[FLAVOR_DISK_KEY]
1573 extended = flavor.get("extended", None)
1574 if extended:
1575 numas=extended.get("numas", None)
1576
1577 except Exception as exp:
1578 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1579
1580 # image upload creates template name as catalog name space Template.
1581 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1582 power_on = 'false'
1583 if start:
1584 power_on = 'true'
1585
1586 # client must provide at least one entry in net_list if not we report error
1587 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1588 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1589 primary_net = None
1590 primary_netname = None
1591 primary_net_href = None
1592 network_mode = 'bridged'
1593 if net_list is not None and len(net_list) > 0:
1594 for net in net_list:
1595 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1596 primary_net = net
1597 if primary_net is None:
1598 primary_net = net_list[0]
1599
1600 try:
1601 primary_net_id = primary_net['net_id']
1602 url_list = [self.url, '/api/network/', primary_net_id]
1603 primary_net_href = ''.join(url_list)
1604 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1605 if 'name' in network_dict:
1606 primary_netname = network_dict['name']
1607
1608 except KeyError:
1609 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1610 else:
1611 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1612
1613 # use: 'data', 'bridge', 'mgmt'
1614 # create vApp. Set vcpu and ram based on flavor id.
1615 try:
1616 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1617 if not vdc_obj:
1618 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1619
1620 for retry in (1,2):
1621 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1622 catalog_items = [items.attrib]
1623
1624 if len(catalog_items) == 1:
1625 if self.client:
1626 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1627 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1628
1629 response = self.perform_request(req_type='GET',
1630 url=catalog_items[0].get('href'),
1631 headers=headers)
1632 catalogItem = XmlElementTree.fromstring(response.content)
1633 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1634 vapp_tempalte_href = entity.get("href")
1635
1636 response = self.perform_request(req_type='GET',
1637 url=vapp_tempalte_href,
1638 headers=headers)
1639 if response.status_code != requests.codes.ok:
1640 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1641 response.status_code))
1642 else:
1643 result = (response.content).replace("\n"," ")
1644
1645 src = re.search('<Vm goldMaster="false"\sstatus="\d+"\sname="(.*?)"\s'
1646 'id="(\w+:\w+:vm:.*?)"\shref="(.*?)"\s'
1647 'type="application/vnd\.vmware\.vcloud\.vm\+xml',result)
1648 if src:
1649 vm_name = src.group(1)
1650 vm_id = src.group(2)
1651 vm_href = src.group(3)
1652
1653 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1654 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1655 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1656
1657 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1658 vdc_id = vdc.get('id').split(':')[-1]
1659 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1660 vdc_id)
1661 data = """<?xml version="1.0" encoding="UTF-8"?>
1662 <InstantiateVAppTemplateParams
1663 xmlns="http://www.vmware.com/vcloud/v1.5"
1664 name="{}"
1665 deploy="false"
1666 powerOn="false"
1667 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1668 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1669 <Description>Vapp instantiation</Description>
1670 <InstantiationParams>
1671 <NetworkConfigSection>
1672 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1673 <NetworkConfig networkName="{}">
1674 <Configuration>
1675 <ParentNetwork href="{}" />
1676 <FenceMode>bridged</FenceMode>
1677 </Configuration>
1678 </NetworkConfig>
1679 </NetworkConfigSection>
1680 <LeaseSettingsSection
1681 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1682 <ovf:Info>Lease Settings</ovf:Info>
1683 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1684 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1685 </LeaseSettingsSection>
1686 </InstantiationParams>
1687 <Source href="{}"/>
1688 <SourcedItem>
1689 <Source href="{}" id="{}" name="{}"
1690 type="application/vnd.vmware.vcloud.vm+xml"/>
1691 <VmGeneralParams>
1692 <NeedsCustomization>false</NeedsCustomization>
1693 </VmGeneralParams>
1694 <InstantiationParams>
1695 <NetworkConnectionSection>
1696 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1697 <NetworkConnection network="{}">
1698 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1699 <IsConnected>true</IsConnected>
1700 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1701 </NetworkConnection>
1702 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1703 <ovf:Info>Virtual hardware requirements</ovf:Info>
1704 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1705 xmlns:vmw="http://www.vmware.com/schema/ovf">
1706 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1707 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1708 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1709 <rasd:InstanceID>4</rasd:InstanceID>
1710 <rasd:Reservation>0</rasd:Reservation>
1711 <rasd:ResourceType>3</rasd:ResourceType>
1712 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1713 <rasd:Weight>0</rasd:Weight>
1714 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1715 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1716 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1717 <rasd:Description>Memory Size</rasd:Description>
1718 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1719 <rasd:InstanceID>5</rasd:InstanceID>
1720 <rasd:Reservation>0</rasd:Reservation>
1721 <rasd:ResourceType>4</rasd:ResourceType>
1722 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1723 <rasd:Weight>0</rasd:Weight>
1724 </ovf:Item>
1725 </ovf:VirtualHardwareSection>
1726 </InstantiationParams>
1727 </SourcedItem>
1728 <AllEULAsAccepted>false</AllEULAsAccepted>
1729 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1730 primary_netname,
1731 primary_net_href,
1732 vapp_tempalte_href,
1733 vm_href,
1734 vm_id,
1735 vm_name,
1736 primary_netname,
1737 cpu=cpus,
1738 core=cores,
1739 memory=memory_mb)
1740
1741 response = self.perform_request(req_type='POST',
1742 url=instantiate_vapp_href,
1743 headers=headers,
1744 data=data)
1745
1746 if response.status_code != 201:
1747 self.logger.error("REST call {} failed reason : {}"\
1748 "status code : {}".format(instantiate_vapp_href,
1749 response.content,
1750 response.status_code))
1751 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1752 "vAapp {}".format(vmname_andid))
1753 else:
1754 vapptask = self.get_task_from_response(response.content)
1755
1756 if vapptask is None and retry==1:
1757 self.get_token() # Retry getting token
1758 continue
1759 else:
1760 break
1761
1762 if vapptask is None or vapptask is False:
1763 raise vimconn.vimconnUnexpectedResponse(
1764 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1765
1766 # wait for task to complete
1767 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1768
1769 if result.get('status') == 'success':
1770 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1771 else:
1772 raise vimconn.vimconnUnexpectedResponse(
1773 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1774
1775 except Exception as exp:
1776 raise vimconn.vimconnUnexpectedResponse(
1777 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1778
1779 # we should have now vapp in undeployed state.
1780 try:
1781 vdc_obj = VDC(self.client, href=vdc.get('href'))
1782 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1783 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1784 vapp = VApp(self.client, resource=vapp_resource)
1785
1786 except Exception as exp:
1787 raise vimconn.vimconnUnexpectedResponse(
1788 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1789 .format(vmname_andid, exp))
1790
1791 if vapp_uuid is None:
1792 raise vimconn.vimconnUnexpectedResponse(
1793 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1794 vmname_andid))
1795
1796 #Add PCI passthrough/SRIOV configrations
1797 vm_obj = None
1798 pci_devices_info = []
1799 sriov_net_info = []
1800 reserve_memory = False
1801
1802 for net in net_list:
1803 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1804 pci_devices_info.append(net)
1805 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1806 sriov_net_info.append(net)
1807
1808 #Add PCI
1809 if len(pci_devices_info) > 0:
1810 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1811 vmname_andid ))
1812 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1813 pci_devices_info,
1814 vmname_andid)
1815 if PCI_devices_status:
1816 self.logger.info("Added PCI devives {} to VM {}".format(
1817 pci_devices_info,
1818 vmname_andid)
1819 )
1820 reserve_memory = True
1821 else:
1822 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1823 pci_devices_info,
1824 vmname_andid)
1825 )
1826
1827 # Modify vm disk
1828 if vm_disk:
1829 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1830 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1831 if result :
1832 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1833
1834 #Add new or existing disks to vApp
1835 if disk_list:
1836 added_existing_disk = False
1837 for disk in disk_list:
1838 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1839 image_id = disk['image_id']
1840 # Adding CD-ROM to VM
1841 # will revisit code once specification ready to support this feature
1842 self.insert_media_to_vm(vapp, image_id)
1843 elif "image_id" in disk and disk["image_id"] is not None:
1844 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1845 disk["image_id"] , vapp_uuid))
1846 self.add_existing_disk(catalogs=catalogs,
1847 image_id=disk["image_id"],
1848 size = disk["size"],
1849 template_name=templateName,
1850 vapp_uuid=vapp_uuid
1851 )
1852 added_existing_disk = True
1853 else:
1854 #Wait till added existing disk gets reflected into vCD database/API
1855 if added_existing_disk:
1856 time.sleep(5)
1857 added_existing_disk = False
1858 self.add_new_disk(vapp_uuid, disk['size'])
1859
1860 if numas:
1861 # Assigning numa affinity setting
1862 for numa in numas:
1863 if 'paired-threads-id' in numa:
1864 paired_threads_id = numa['paired-threads-id']
1865 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1866
1867 # add NICs & connect to networks in netlist
1868 try:
1869 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1870 primary_nic_index = 0
1871 nicIndex = 0
1872 for net in net_list:
1873 # openmano uses network id in UUID format.
1874 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1875 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1876 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1877
1878 if 'net_id' not in net:
1879 continue
1880
1881 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1882 #Same will be returned in refresh_vms_status() as vim_interface_id
1883 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1884
1885 interface_net_id = net['net_id']
1886 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1887 interface_network_mode = net['use']
1888
1889 if interface_net_name == primary_netname:
1890 nicIndex += 1
1891 continue
1892
1893 if interface_network_mode == 'mgmt':
1894 primary_nic_index = nicIndex
1895
1896 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1897 - DHCP (The IP address is obtained from a DHCP service.)
1898 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1899 - NONE (No IP addressing mode specified.)"""
1900
1901 if primary_netname is not None:
1902 nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
1903 #For python3
1904 #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1905 if len(nets) == 1:
1906 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1907
1908 vdc_obj = VDC(self.client, href=vdc.get('href'))
1909 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1910 vapp = VApp(self.client, resource=vapp_resource)
1911 # connect network to VM - with all DHCP by default
1912 task = vapp.connect_org_vdc_network(nets[0].get('name'))
1913
1914 self.client.get_task_monitor().wait_for_success(task=task)
1915
1916 type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
1917 if 'type' in net and net['type'] not in type_list:
1918 # fetching nic type from vnf
1919 if 'model' in net:
1920 if net['model'] is not None:
1921 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1922 nic_type = 'VMXNET3'
1923 else:
1924 nic_type = net['model']
1925
1926 self.logger.info("new_vminstance(): adding network adapter "\
1927 "to a network {}".format(nets[0].get('name')))
1928 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1929 primary_nic_index,
1930 nicIndex,
1931 net,
1932 nic_type=nic_type)
1933 else:
1934 self.logger.info("new_vminstance(): adding network adapter "\
1935 "to a network {}".format(nets[0].get('name')))
1936 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1937 primary_nic_index,
1938 nicIndex,
1939 net)
1940 nicIndex += 1
1941
1942 # cloud-init for ssh-key injection
1943 if cloud_config:
1944 self.cloud_init(vapp,cloud_config)
1945
1946 # ############# Stub code for SRIOV #################
1947 #Add SRIOV
1948 # if len(sriov_net_info) > 0:
1949 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1950 # vmname_andid ))
1951 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1952 # sriov_net_info,
1953 # vmname_andid)
1954 # if sriov_status:
1955 # self.logger.info("Added SRIOV {} to VM {}".format(
1956 # sriov_net_info,
1957 # vmname_andid)
1958 # )
1959 # reserve_memory = True
1960 # else:
1961 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1962 # sriov_net_info,
1963 # vmname_andid)
1964 # )
1965
1966 # If VM has PCI devices or SRIOV reserve memory for VM
1967 if reserve_memory:
1968 memReserve = vm_obj.config.hardware.memoryMB
1969 spec = vim.vm.ConfigSpec()
1970 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1971 task = vm_obj.ReconfigVM_Task(spec=spec)
1972 if task:
1973 result = self.wait_for_vcenter_task(task, vcenter_conect)
1974 self.logger.info("Reserved memory {} MB for "
1975 "VM VM status: {}".format(str(memReserve), result))
1976 else:
1977 self.logger.info("Fail to reserved memory {} to VM {}".format(
1978 str(memReserve), str(vm_obj)))
1979
1980 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1981
1982 vapp_id = vapp_resource.get('id').split(':')[-1]
1983 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1984 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1985 if result.get('status') == 'success':
1986 self.logger.info("new_vminstance(): Successfully power on "\
1987 "vApp {}".format(vmname_andid))
1988 else:
1989 self.logger.error("new_vminstance(): failed to power on vApp "\
1990 "{}".format(vmname_andid))
1991
1992 except Exception as exp :
1993 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1994 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1995 .format(name, exp))
1996 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1997 .format(name, exp))
1998
1999 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2000 wait_time = 0
2001 vapp_uuid = None
2002 while wait_time <= MAX_WAIT_TIME:
2003 try:
2004 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2005 vapp = VApp(self.client, resource=vapp_resource)
2006 except Exception as exp:
2007 raise vimconn.vimconnUnexpectedResponse(
2008 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
2009 .format(vmname_andid, exp))
2010
2011 #if vapp and vapp.me.deployed:
2012 if vapp and vapp_resource.get('deployed') == 'true':
2013 vapp_uuid = vapp_resource.get('id').split(':')[-1]
2014 break
2015 else:
2016 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
2017 time.sleep(INTERVAL_TIME)
2018
2019 wait_time +=INTERVAL_TIME
2020
2021 #SET Affinity Rule for VM
2022 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2023 #While creating VIM account user has to pass the Host Group names in availability_zone list
2024 #"availability_zone" is a part of VIM "config" parameters
2025 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2026 #Host groups are referred as availability zones
2027 #With following procedure, deployed VM will be added into a VM group.
2028 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2029 if(availability_zone_list):
2030 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2031 #Admin access required for creating Affinity rules
2032 client = self.connect_as_admin()
2033 if not client:
2034 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2035 else:
2036 self.client = client
2037 if self.client:
2038 headers = {'Accept':'application/*+xml;version=27.0',
2039 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2040 #Step1: Get provider vdc details from organization
2041 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2042 if pvdc_href is not None:
2043 #Step2: Found required pvdc, now get resource pool information
2044 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2045 if respool_href is None:
2046 #Raise error if respool_href not found
2047 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2048 .format(pvdc_href)
2049 self.log_message(msg)
2050
2051 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2052 # get availability Zone
2053 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2054 # check if provided av zone(hostGroup) is present in vCD VIM
2055 status = self.check_availibility_zone(vm_az, respool_href, headers)
2056 if status is False:
2057 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2058 "resource pool {} status: {}".format(vm_az,respool_href,status)
2059 self.log_message(msg)
2060 else:
2061 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2062
2063 #Step4: Find VM group references to create vm group
2064 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2065 if vmgrp_href == None:
2066 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2067 self.log_message(msg)
2068
2069 #Step5: Create a VmGroup with name az_VmGroup
2070 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2071 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2072 if status is not True:
2073 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2074 self.log_message(msg)
2075
2076 #VM Group url to add vms to vm group
2077 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2078
2079 #Step6: Add VM to VM Group
2080 #Find VM uuid from vapp_uuid
2081 vm_details = self.get_vapp_details_rest(vapp_uuid)
2082 vm_uuid = vm_details['vmuuid']
2083
2084 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2085 if status is not True:
2086 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2087 self.log_message(msg)
2088
2089 #Step7: Create VM to Host affinity rule
2090 addrule_href = self.get_add_rule_reference (respool_href, headers)
2091 if addrule_href is None:
2092 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2093 .format(respool_href)
2094 self.log_message(msg)
2095
2096 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2097 if status is False:
2098 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2099 .format(name, vm_az)
2100 self.log_message(msg)
2101 else:
2102 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2103 .format(name, vm_az))
2104 #Reset token to a normal user to perform other operations
2105 self.get_token()
2106
2107 if vapp_uuid is not None:
2108 return vapp_uuid, None
2109 else:
2110 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2111
2112
2113 def get_vcd_availibility_zones(self,respool_href, headers):
2114 """ Method to find presence of av zone is VIM resource pool
2115
2116 Args:
2117 respool_href - resource pool href
2118 headers - header information
2119
2120 Returns:
2121 vcd_az - list of azone present in vCD
2122 """
2123 vcd_az = []
2124 url=respool_href
2125 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2126
2127 if resp.status_code != requests.codes.ok:
2128 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2129 else:
2130 #Get the href to hostGroups and find provided hostGroup is present in it
2131 resp_xml = XmlElementTree.fromstring(resp.content)
2132 for child in resp_xml:
2133 if 'VMWProviderVdcResourcePool' in child.tag:
2134 for schild in child:
2135 if 'Link' in schild.tag:
2136 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2137 hostGroup = schild.attrib.get('href')
2138 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2139 if hg_resp.status_code != requests.codes.ok:
2140 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2141 else:
2142 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2143 for hostGroup in hg_resp_xml:
2144 if 'HostGroup' in hostGroup.tag:
2145 #append host group name to the list
2146 vcd_az.append(hostGroup.attrib.get("name"))
2147 return vcd_az
2148
2149
2150 def set_availability_zones(self):
2151 """
2152 Set vim availability zone
2153 """
2154
2155 vim_availability_zones = None
2156 availability_zone = None
2157 if 'availability_zone' in self.config:
2158 vim_availability_zones = self.config.get('availability_zone')
2159 if isinstance(vim_availability_zones, str):
2160 availability_zone = [vim_availability_zones]
2161 elif isinstance(vim_availability_zones, list):
2162 availability_zone = vim_availability_zones
2163 else:
2164 return availability_zone
2165
2166 return availability_zone
2167
2168
2169 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2170 """
2171 Return the availability zone to be used by the created VM.
2172 returns: The VIM availability zone to be used or None
2173 """
2174 if availability_zone_index is None:
2175 if not self.config.get('availability_zone'):
2176 return None
2177 elif isinstance(self.config.get('availability_zone'), str):
2178 return self.config['availability_zone']
2179 else:
2180 return self.config['availability_zone'][0]
2181
2182 vim_availability_zones = self.availability_zone
2183
2184 # check if VIM offer enough availability zones describe in the VNFD
2185 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2186 # check if all the names of NFV AV match VIM AV names
2187 match_by_index = False
2188 for av in availability_zone_list:
2189 if av not in vim_availability_zones:
2190 match_by_index = True
2191 break
2192 if match_by_index:
2193 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2194 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2195 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2196 self.logger.debug("VIM Availability zones will be used by index")
2197 return vim_availability_zones[availability_zone_index]
2198 else:
2199 return availability_zone_list[availability_zone_index]
2200 else:
2201 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2202
2203
2204 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2205 """ Method to create VM to Host Affinity rule in vCD
2206
2207 Args:
2208 addrule_href - href to make a POST request
2209 vmgrpname - name of the VM group created
2210 hostgrpnmae - name of the host group created earlier
2211 polarity - Affinity or Anti-affinity (default: Affinity)
2212 headers - headers to make REST call
2213
2214 Returns:
2215 True- if rule is created
2216 False- Failed to create rule due to some error
2217
2218 """
2219 task_status = False
2220 rule_name = polarity + "_" + vmgrpname
2221 payload = """<?xml version="1.0" encoding="UTF-8"?>
2222 <vmext:VMWVmHostAffinityRule
2223 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2224 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2225 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2226 <vcloud:Name>{}</vcloud:Name>
2227 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2228 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2229 <vcloud:Polarity>{}</vcloud:Polarity>
2230 <vmext:HostGroupName>{}</vmext:HostGroupName>
2231 <vmext:VmGroupName>{}</vmext:VmGroupName>
2232 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2233
2234 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2235
2236 if resp.status_code != requests.codes.accepted:
2237 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2238 task_status = False
2239 return task_status
2240 else:
2241 affinity_task = self.get_task_from_response(resp.content)
2242 self.logger.debug ("affinity_task: {}".format(affinity_task))
2243 if affinity_task is None or affinity_task is False:
2244 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2245 # wait for task to complete
2246 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2247 if result.get('status') == 'success':
2248 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2249 return True
2250 else:
2251 raise vimconn.vimconnUnexpectedResponse(
2252 "failed to create affinity rule {}".format(rule_name))
2253
2254
2255 def get_add_rule_reference (self, respool_href, headers):
2256 """ This method finds href to add vm to host affinity rule to vCD
2257
2258 Args:
2259 respool_href- href to resource pool
2260 headers- header information to make REST call
2261
2262 Returns:
2263 None - if no valid href to add rule found or
2264 addrule_href - href to add vm to host affinity rule of resource pool
2265 """
2266 addrule_href = None
2267 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2268
2269 if resp.status_code != requests.codes.ok:
2270 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2271 else:
2272
2273 resp_xml = XmlElementTree.fromstring(resp.content)
2274 for child in resp_xml:
2275 if 'VMWProviderVdcResourcePool' in child.tag:
2276 for schild in child:
2277 if 'Link' in schild.tag:
2278 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2279 schild.attrib.get('rel') == "add":
2280 addrule_href = schild.attrib.get('href')
2281 break
2282
2283 return addrule_href
2284
2285
2286 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2287 """ Method to add deployed VM to newly created VM Group.
2288 This is required to create VM to Host affinity in vCD
2289
2290 Args:
2291 vm_uuid- newly created vm uuid
2292 vmGroupNameURL- URL to VM Group name
2293 vmGroup_name- Name of VM group created
2294 headers- Headers for REST request
2295
2296 Returns:
2297 True- if VM added to VM group successfully
2298 False- if any error encounter
2299 """
2300
2301 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2302
2303 if addvm_resp.status_code != requests.codes.ok:
2304 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2305 .format(vmGroupNameURL, addvm_resp.status_code))
2306 return False
2307 else:
2308 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2309 for child in resp_xml:
2310 if child.tag.split('}')[1] == 'Link':
2311 if child.attrib.get("rel") == "addVms":
2312 addvmtogrpURL = child.attrib.get("href")
2313
2314 #Get vm details
2315 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2316 vmdetailsURL = ''.join(url_list)
2317
2318 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2319
2320 if resp.status_code != requests.codes.ok:
2321 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2322 return False
2323
2324 #Parse VM details
2325 resp_xml = XmlElementTree.fromstring(resp.content)
2326 if resp_xml.tag.split('}')[1] == "Vm":
2327 vm_id = resp_xml.attrib.get("id")
2328 vm_name = resp_xml.attrib.get("name")
2329 vm_href = resp_xml.attrib.get("href")
2330 #print vm_id, vm_name, vm_href
2331 #Add VM into VMgroup
2332 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2333 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2334 xmlns="http://www.vmware.com/vcloud/versions" \
2335 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2336 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2337 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2338 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2339 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2340 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2341 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2342 <ns2:VmReference href="{}" id="{}" name="{}" \
2343 type="application/vnd.vmware.vcloud.vm+xml" />\
2344 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2345
2346 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2347
2348 if addvmtogrp_resp.status_code != requests.codes.accepted:
2349 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2350 return False
2351 else:
2352 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2353 return True
2354
2355
2356 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2357 """Method to create a VM group in vCD
2358
2359 Args:
2360 vmgroup_name : Name of VM group to be created
2361 vmgroup_href : href for vmgroup
2362 headers- Headers for REST request
2363 """
2364 #POST to add URL with required data
2365 vmgroup_status = False
2366 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2367 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2368 <vmCount>1</vmCount>\
2369 </VMWVmGroup>""".format(vmgroup_name)
2370 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2371
2372 if resp.status_code != requests.codes.accepted:
2373 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2374 return vmgroup_status
2375 else:
2376 vmgroup_task = self.get_task_from_response(resp.content)
2377 if vmgroup_task is None or vmgroup_task is False:
2378 raise vimconn.vimconnUnexpectedResponse(
2379 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2380
2381 # wait for task to complete
2382 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2383
2384 if result.get('status') == 'success':
2385 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2386 #time.sleep(10)
2387 vmgroup_status = True
2388 return vmgroup_status
2389 else:
2390 raise vimconn.vimconnUnexpectedResponse(\
2391 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2392
2393
2394 def find_vmgroup_reference(self, url, headers):
2395 """ Method to create a new VMGroup which is required to add created VM
2396 Args:
2397 url- resource pool href
2398 headers- header information
2399
2400 Returns:
2401 returns href to VM group to create VM group
2402 """
2403 #Perform GET on resource pool to find 'add' link to create VMGroup
2404 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2405 vmgrp_href = None
2406 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2407
2408 if resp.status_code != requests.codes.ok:
2409 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2410 else:
2411 #Get the href to add vmGroup to vCD
2412 resp_xml = XmlElementTree.fromstring(resp.content)
2413 for child in resp_xml:
2414 if 'VMWProviderVdcResourcePool' in child.tag:
2415 for schild in child:
2416 if 'Link' in schild.tag:
2417 #Find href with type VMGroup and rel with add
2418 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2419 and schild.attrib.get('rel') == "add":
2420 vmgrp_href = schild.attrib.get('href')
2421 return vmgrp_href
2422
2423
2424 def check_availibility_zone(self, az, respool_href, headers):
2425 """ Method to verify requested av zone is present or not in provided
2426 resource pool
2427
2428 Args:
2429 az - name of hostgroup (availibility_zone)
2430 respool_href - Resource Pool href
2431 headers - Headers to make REST call
2432 Returns:
2433 az_found - True if availibility_zone is found else False
2434 """
2435 az_found = False
2436 headers['Accept']='application/*+xml;version=27.0'
2437 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2438
2439 if resp.status_code != requests.codes.ok:
2440 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2441 else:
2442 #Get the href to hostGroups and find provided hostGroup is present in it
2443 resp_xml = XmlElementTree.fromstring(resp.content)
2444
2445 for child in resp_xml:
2446 if 'VMWProviderVdcResourcePool' in child.tag:
2447 for schild in child:
2448 if 'Link' in schild.tag:
2449 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2450 hostGroup_href = schild.attrib.get('href')
2451 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2452 if hg_resp.status_code != requests.codes.ok:
2453 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2454 else:
2455 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2456 for hostGroup in hg_resp_xml:
2457 if 'HostGroup' in hostGroup.tag:
2458 if hostGroup.attrib.get("name") == az:
2459 az_found = True
2460 break
2461 return az_found
2462
2463
2464 def get_pvdc_for_org(self, org_vdc, headers):
2465 """ This method gets provider vdc references from organisation
2466
2467 Args:
2468 org_vdc - name of the organisation VDC to find pvdc
2469 headers - headers to make REST call
2470
2471 Returns:
2472 None - if no pvdc href found else
2473 pvdc_href - href to pvdc
2474 """
2475
2476 #Get provider VDC references from vCD
2477 pvdc_href = None
2478 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2479 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2480 url = ''.join(url_list)
2481
2482 response = self.perform_request(req_type='GET',url=url, headers=headers)
2483 if response.status_code != requests.codes.ok:
2484 self.logger.debug ("REST API call {} failed. Return status code {}"\
2485 .format(url, response.status_code))
2486 else:
2487 xmlroot_response = XmlElementTree.fromstring(response.content)
2488 for child in xmlroot_response:
2489 if 'ProviderVdcReference' in child.tag:
2490 pvdc_href = child.attrib.get('href')
2491 #Get vdcReferences to find org
2492 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2493 if pvdc_resp.status_code != requests.codes.ok:
2494 raise vimconn.vimconnException("REST API call {} failed. "\
2495 "Return status code {}"\
2496 .format(url, pvdc_resp.status_code))
2497
2498 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2499 for child in pvdc_resp_xml:
2500 if 'Link' in child.tag:
2501 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2502 vdc_href = child.attrib.get('href')
2503
2504 #Check if provided org is present in vdc
2505 vdc_resp = self.perform_request(req_type='GET',
2506 url=vdc_href,
2507 headers=headers)
2508 if vdc_resp.status_code != requests.codes.ok:
2509 raise vimconn.vimconnException("REST API call {} failed. "\
2510 "Return status code {}"\
2511 .format(url, vdc_resp.status_code))
2512 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2513 for child in vdc_resp_xml:
2514 if 'VdcReference' in child.tag:
2515 if child.attrib.get('name') == org_vdc:
2516 return pvdc_href
2517
2518
2519 def get_resource_pool_details(self, pvdc_href, headers):
2520 """ Method to get resource pool information.
2521 Host groups are property of resource group.
2522 To get host groups, we need to GET details of resource pool.
2523
2524 Args:
2525 pvdc_href: href to pvdc details
2526 headers: headers
2527
2528 Returns:
2529 respool_href - Returns href link reference to resource pool
2530 """
2531 respool_href = None
2532 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2533
2534 if resp.status_code != requests.codes.ok:
2535 self.logger.debug ("REST API call {} failed. Return status code {}"\
2536 .format(pvdc_href, resp.status_code))
2537 else:
2538 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2539 for child in respool_resp_xml:
2540 if 'Link' in child.tag:
2541 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2542 respool_href = child.attrib.get("href")
2543 break
2544 return respool_href
2545
2546
2547 def log_message(self, msg):
2548 """
2549 Method to log error messages related to Affinity rule creation
2550 in new_vminstance & raise Exception
2551 Args :
2552 msg - Error message to be logged
2553
2554 """
2555 #get token to connect vCD as a normal user
2556 self.get_token()
2557 self.logger.debug(msg)
2558 raise vimconn.vimconnException(msg)
2559
2560
2561 ##
2562 ##
2563 ## based on current discussion
2564 ##
2565 ##
2566 ## server:
2567 # created: '2016-09-08T11:51:58'
2568 # description: simple-instance.linux1.1
2569 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2570 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2571 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2572 # status: ACTIVE
2573 # error_msg:
2574 # interfaces: …
2575 #
2576 def get_vminstance(self, vim_vm_uuid=None):
2577 """Returns the VM instance information from VIM"""
2578
2579 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2580
2581 org, vdc = self.get_vdc_details()
2582 if vdc is None:
2583 raise vimconn.vimconnConnectionException(
2584 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2585
2586 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2587 if not vm_info_dict:
2588 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2589 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2590
2591 status_key = vm_info_dict['status']
2592 error = ''
2593 try:
2594 vm_dict = {'created': vm_info_dict['created'],
2595 'description': vm_info_dict['name'],
2596 'status': vcdStatusCode2manoFormat[int(status_key)],
2597 'hostId': vm_info_dict['vmuuid'],
2598 'error_msg': error,
2599 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2600
2601 if 'interfaces' in vm_info_dict:
2602 vm_dict['interfaces'] = vm_info_dict['interfaces']
2603 else:
2604 vm_dict['interfaces'] = []
2605 except KeyError:
2606 vm_dict = {'created': '',
2607 'description': '',
2608 'status': vcdStatusCode2manoFormat[int(-1)],
2609 'hostId': vm_info_dict['vmuuid'],
2610 'error_msg': "Inconsistency state",
2611 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2612
2613 return vm_dict
2614
2615 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2616 """Method poweroff and remove VM instance from vcloud director network.
2617
2618 Args:
2619 vm__vim_uuid: VM UUID
2620
2621 Returns:
2622 Returns the instance identifier
2623 """
2624
2625 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2626
2627 org, vdc = self.get_vdc_details()
2628 vdc_obj = VDC(self.client, href=vdc.get('href'))
2629 if vdc_obj is None:
2630 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2631 self.tenant_name))
2632 raise vimconn.vimconnException(
2633 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2634
2635 try:
2636 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2637 vapp_resource = vdc_obj.get_vapp(vapp_name)
2638 vapp = VApp(self.client, resource=vapp_resource)
2639 if vapp_name is None:
2640 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2641 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2642 else:
2643 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2644
2645 # Delete vApp and wait for status change if task executed and vApp is None.
2646
2647 if vapp:
2648 if vapp_resource.get('deployed') == 'true':
2649 self.logger.info("Powering off vApp {}".format(vapp_name))
2650 #Power off vApp
2651 powered_off = False
2652 wait_time = 0
2653 while wait_time <= MAX_WAIT_TIME:
2654 power_off_task = vapp.power_off()
2655 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2656
2657 if result.get('status') == 'success':
2658 powered_off = True
2659 break
2660 else:
2661 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2662 time.sleep(INTERVAL_TIME)
2663
2664 wait_time +=INTERVAL_TIME
2665 if not powered_off:
2666 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2667 else:
2668 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2669
2670 #Undeploy vApp
2671 self.logger.info("Undeploy vApp {}".format(vapp_name))
2672 wait_time = 0
2673 undeployed = False
2674 while wait_time <= MAX_WAIT_TIME:
2675 vapp = VApp(self.client, resource=vapp_resource)
2676 if not vapp:
2677 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2678 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2679 undeploy_task = vapp.undeploy()
2680
2681 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2682 if result.get('status') == 'success':
2683 undeployed = True
2684 break
2685 else:
2686 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2687 time.sleep(INTERVAL_TIME)
2688
2689 wait_time +=INTERVAL_TIME
2690
2691 if not undeployed:
2692 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2693
2694 # delete vapp
2695 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2696
2697 if vapp is not None:
2698 wait_time = 0
2699 result = False
2700
2701 while wait_time <= MAX_WAIT_TIME:
2702 vapp = VApp(self.client, resource=vapp_resource)
2703 if not vapp:
2704 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2705 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2706
2707 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2708
2709 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2710 if result.get('status') == 'success':
2711 break
2712 else:
2713 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2714 time.sleep(INTERVAL_TIME)
2715
2716 wait_time +=INTERVAL_TIME
2717
2718 if result is None:
2719 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2720 else:
2721 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2722 return vm__vim_uuid
2723 except:
2724 self.logger.debug(traceback.format_exc())
2725 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2726
2727
2728 def refresh_vms_status(self, vm_list):
2729 """Get the status of the virtual machines and their interfaces/ports
2730 Params: the list of VM identifiers
2731 Returns a dictionary with:
2732 vm_id: #VIM id of this Virtual Machine
2733 status: #Mandatory. Text with one of:
2734 # DELETED (not found at vim)
2735 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2736 # OTHER (Vim reported other status not understood)
2737 # ERROR (VIM indicates an ERROR status)
2738 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2739 # CREATING (on building process), ERROR
2740 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2741 #
2742 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2743 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2744 interfaces:
2745 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2746 mac_address: #Text format XX:XX:XX:XX:XX:XX
2747 vim_net_id: #network id where this interface is connected
2748 vim_interface_id: #interface/port VIM id
2749 ip_address: #null, or text with IPv4, IPv6 address
2750 """
2751
2752 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2753
2754 org,vdc = self.get_vdc_details()
2755 if vdc is None:
2756 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2757
2758 vms_dict = {}
2759 nsx_edge_list = []
2760 for vmuuid in vm_list:
2761 vapp_name = self.get_namebyvappid(vmuuid)
2762 if vapp_name is not None:
2763
2764 try:
2765 vm_pci_details = self.get_vm_pci_details(vmuuid)
2766 vdc_obj = VDC(self.client, href=vdc.get('href'))
2767 vapp_resource = vdc_obj.get_vapp(vapp_name)
2768 the_vapp = VApp(self.client, resource=vapp_resource)
2769
2770 vm_details = {}
2771 for vm in the_vapp.get_all_vms():
2772 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2773 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2774 response = self.perform_request(req_type='GET',
2775 url=vm.get('href'),
2776 headers=headers)
2777
2778 if response.status_code != 200:
2779 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2780 "status code : {}".format(vm.get('href'),
2781 response.content,
2782 response.status_code))
2783 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2784 "VM details")
2785 xmlroot = XmlElementTree.fromstring(response.content)
2786
2787
2788 result = response.content.replace("\n"," ")
2789 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2790 if hdd_match:
2791 hdd_mb = hdd_match.group(1)
2792 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2793 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2794 if cpus_match:
2795 cpus = cpus_match.group(1)
2796 vm_details['cpus'] = int(cpus) if cpus else None
2797 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2798 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2799 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2800 vm_details['id'] = xmlroot.get('id')
2801 vm_details['name'] = xmlroot.get('name')
2802 vm_info = [vm_details]
2803 if vm_pci_details:
2804 vm_info[0].update(vm_pci_details)
2805
2806 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2807 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2808 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2809
2810 # get networks
2811 vm_ip = None
2812 vm_mac = None
2813 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2814 for network in networks:
2815 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2816 vm_mac = mac_s.group(1) if mac_s else None
2817 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2818 vm_ip = ip_s.group(1) if ip_s else None
2819
2820 if vm_ip is None:
2821 if not nsx_edge_list:
2822 nsx_edge_list = self.get_edge_details()
2823 if nsx_edge_list is None:
2824 raise vimconn.vimconnException("refresh_vms_status:"\
2825 "Failed to get edge details from NSX Manager")
2826 if vm_mac is not None:
2827 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2828
2829 net_s = re.search('network="(.*?)"',network)
2830 network_name = net_s.group(1) if net_s else None
2831
2832 vm_net_id = self.get_network_id_by_name(network_name)
2833 interface = {"mac_address": vm_mac,
2834 "vim_net_id": vm_net_id,
2835 "vim_interface_id": vm_net_id,
2836 "ip_address": vm_ip}
2837
2838 vm_dict["interfaces"].append(interface)
2839
2840 # add a vm to vm dict
2841 vms_dict.setdefault(vmuuid, vm_dict)
2842 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2843 except Exception as exp:
2844 self.logger.debug("Error in response {}".format(exp))
2845 self.logger.debug(traceback.format_exc())
2846
2847 return vms_dict
2848
2849
2850 def get_edge_details(self):
2851 """Get the NSX edge list from NSX Manager
2852 Returns list of NSX edges
2853 """
2854 edge_list = []
2855 rheaders = {'Content-Type': 'application/xml'}
2856 nsx_api_url = '/api/4.0/edges'
2857
2858 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2859
2860 try:
2861 resp = requests.get(self.nsx_manager + nsx_api_url,
2862 auth = (self.nsx_user, self.nsx_password),
2863 verify = False, headers = rheaders)
2864 if resp.status_code == requests.codes.ok:
2865 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2866 for edge_pages in paged_Edge_List:
2867 if edge_pages.tag == 'edgePage':
2868 for edge_summary in edge_pages:
2869 if edge_summary.tag == 'pagingInfo':
2870 for element in edge_summary:
2871 if element.tag == 'totalCount' and element.text == '0':
2872 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2873 .format(self.nsx_manager))
2874
2875 if edge_summary.tag == 'edgeSummary':
2876 for element in edge_summary:
2877 if element.tag == 'id':
2878 edge_list.append(element.text)
2879 else:
2880 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2881 .format(self.nsx_manager))
2882
2883 if not edge_list:
2884 raise vimconn.vimconnException("get_edge_details: "\
2885 "No NSX edge details found: {}"
2886 .format(self.nsx_manager))
2887 else:
2888 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2889 return edge_list
2890 else:
2891 self.logger.debug("get_edge_details: "
2892 "Failed to get NSX edge details from NSX Manager: {}"
2893 .format(resp.content))
2894 return None
2895
2896 except Exception as exp:
2897 self.logger.debug("get_edge_details: "\
2898 "Failed to get NSX edge details from NSX Manager: {}"
2899 .format(exp))
2900 raise vimconn.vimconnException("get_edge_details: "\
2901 "Failed to get NSX edge details from NSX Manager: {}"
2902 .format(exp))
2903
2904
2905 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2906 """Get IP address details from NSX edges, using the MAC address
2907 PARAMS: nsx_edges : List of NSX edges
2908 mac_address : Find IP address corresponding to this MAC address
2909 Returns: IP address corrresponding to the provided MAC address
2910 """
2911
2912 ip_addr = None
2913 rheaders = {'Content-Type': 'application/xml'}
2914
2915 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2916
2917 try:
2918 for edge in nsx_edges:
2919 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2920
2921 resp = requests.get(self.nsx_manager + nsx_api_url,
2922 auth = (self.nsx_user, self.nsx_password),
2923 verify = False, headers = rheaders)
2924
2925 if resp.status_code == requests.codes.ok:
2926 dhcp_leases = XmlElementTree.fromstring(resp.text)
2927 for child in dhcp_leases:
2928 if child.tag == 'dhcpLeaseInfo':
2929 dhcpLeaseInfo = child
2930 for leaseInfo in dhcpLeaseInfo:
2931 for elem in leaseInfo:
2932 if (elem.tag)=='macAddress':
2933 edge_mac_addr = elem.text
2934 if (elem.tag)=='ipAddress':
2935 ip_addr = elem.text
2936 if edge_mac_addr is not None:
2937 if edge_mac_addr == mac_address:
2938 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2939 .format(ip_addr, mac_address,edge))
2940 return ip_addr
2941 else:
2942 self.logger.debug("get_ipaddr_from_NSXedge: "\
2943 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2944 .format(resp.content))
2945
2946 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2947 return None
2948
2949 except XmlElementTree.ParseError as Err:
2950 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2951
2952
2953 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2954 """Send and action over a VM instance from VIM
2955 Returns the vm_id if the action was successfully sent to the VIM"""
2956
2957 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2958 if vm__vim_uuid is None or action_dict is None:
2959 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2960
2961 org, vdc = self.get_vdc_details()
2962 if vdc is None:
2963 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2964
2965 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2966 if vapp_name is None:
2967 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2968 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2969 else:
2970 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2971
2972 try:
2973 vdc_obj = VDC(self.client, href=vdc.get('href'))
2974 vapp_resource = vdc_obj.get_vapp(vapp_name)
2975 vapp = VApp(self.client, resource=vapp_resource)
2976 if "start" in action_dict:
2977 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2978 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2979 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2980 self.instance_actions_result("start", result, vapp_name)
2981 elif "rebuild" in action_dict:
2982 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2983 rebuild_task = vapp.deploy(power_on=True)
2984 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
2985 self.instance_actions_result("rebuild", result, vapp_name)
2986 elif "pause" in action_dict:
2987 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2988 pause_task = vapp.undeploy(action='suspend')
2989 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
2990 self.instance_actions_result("pause", result, vapp_name)
2991 elif "resume" in action_dict:
2992 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2993 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2994 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2995 self.instance_actions_result("resume", result, vapp_name)
2996 elif "shutoff" in action_dict or "shutdown" in action_dict:
2997 action_name , value = action_dict.items()[0]
2998 #For python3
2999 #action_name , value = list(action_dict.items())[0]
3000 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3001 shutdown_task = vapp.shutdown()
3002 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3003 if action_name == "shutdown":
3004 self.instance_actions_result("shutdown", result, vapp_name)
3005 else:
3006 self.instance_actions_result("shutoff", result, vapp_name)
3007 elif "forceOff" in action_dict:
3008 result = vapp.undeploy(action='powerOff')
3009 self.instance_actions_result("forceOff", result, vapp_name)
3010 elif "reboot" in action_dict:
3011 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3012 reboot_task = vapp.reboot()
3013 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3014 else:
3015 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3016 return vm__vim_uuid
3017 except Exception as exp :
3018 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3019 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3020
3021 def instance_actions_result(self, action, result, vapp_name):
3022 if result.get('status') == 'success':
3023 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3024 else:
3025 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3026
3027 def get_vminstance_console(self, vm_id, console_type="vnc"):
3028 """
3029 Get a console for the virtual machine
3030 Params:
3031 vm_id: uuid of the VM
3032 console_type, can be:
3033 "novnc" (by default), "xvpvnc" for VNC types,
3034 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3035 Returns dict with the console parameters:
3036 protocol: ssh, ftp, http, https, ...
3037 server: usually ip address
3038 port: the http, ssh, ... port
3039 suffix: extra text, e.g. the http path and query string
3040 """
3041 raise vimconn.vimconnNotImplemented("Should have implemented this")
3042
3043 # NOT USED METHODS in current version
3044
3045 def host_vim2gui(self, host, server_dict):
3046 """Transform host dictionary from VIM format to GUI format,
3047 and append to the server_dict
3048 """
3049 raise vimconn.vimconnNotImplemented("Should have implemented this")
3050
3051 def get_hosts_info(self):
3052 """Get the information of deployed hosts
3053 Returns the hosts content"""
3054 raise vimconn.vimconnNotImplemented("Should have implemented this")
3055
3056 def get_hosts(self, vim_tenant):
3057 """Get the hosts and deployed instances
3058 Returns the hosts content"""
3059 raise vimconn.vimconnNotImplemented("Should have implemented this")
3060
3061 def get_processor_rankings(self):
3062 """Get the processor rankings in the VIM database"""
3063 raise vimconn.vimconnNotImplemented("Should have implemented this")
3064
3065 def new_host(self, host_data):
3066 """Adds a new host to VIM"""
3067 '''Returns status code of the VIM response'''
3068 raise vimconn.vimconnNotImplemented("Should have implemented this")
3069
3070 def new_external_port(self, port_data):
3071 """Adds a external port to VIM"""
3072 '''Returns the port identifier'''
3073 raise vimconn.vimconnNotImplemented("Should have implemented this")
3074
3075 def new_external_network(self, net_name, net_type):
3076 """Adds a external network to VIM (shared)"""
3077 '''Returns the network identifier'''
3078 raise vimconn.vimconnNotImplemented("Should have implemented this")
3079
3080 def connect_port_network(self, port_id, network_id, admin=False):
3081 """Connects a external port to a network"""
3082 '''Returns status code of the VIM response'''
3083 raise vimconn.vimconnNotImplemented("Should have implemented this")
3084
3085 def new_vminstancefromJSON(self, vm_data):
3086 """Adds a VM instance to VIM"""
3087 '''Returns the instance identifier'''
3088 raise vimconn.vimconnNotImplemented("Should have implemented this")
3089
3090 def get_network_name_by_id(self, network_uuid=None):
3091 """Method gets vcloud director network named based on supplied uuid.
3092
3093 Args:
3094 network_uuid: network_id
3095
3096 Returns:
3097 The return network name.
3098 """
3099
3100 if not network_uuid:
3101 return None
3102
3103 try:
3104 org_dict = self.get_org(self.org_uuid)
3105 if 'networks' in org_dict:
3106 org_network_dict = org_dict['networks']
3107 for net_uuid in org_network_dict:
3108 if net_uuid == network_uuid:
3109 return org_network_dict[net_uuid]
3110 except:
3111 self.logger.debug("Exception in get_network_name_by_id")
3112 self.logger.debug(traceback.format_exc())
3113
3114 return None
3115
3116 def get_network_id_by_name(self, network_name=None):
3117 """Method gets vcloud director network uuid based on supplied name.
3118
3119 Args:
3120 network_name: network_name
3121 Returns:
3122 The return network uuid.
3123 network_uuid: network_id
3124 """
3125
3126 if not network_name:
3127 self.logger.debug("get_network_id_by_name() : Network name is empty")
3128 return None
3129
3130 try:
3131 org_dict = self.get_org(self.org_uuid)
3132 if org_dict and 'networks' in org_dict:
3133 org_network_dict = org_dict['networks']
3134 for net_uuid,net_name in org_network_dict.iteritems():
3135 #For python3
3136 #for net_uuid,net_name in org_network_dict.items():
3137 if net_name == network_name:
3138 return net_uuid
3139
3140 except KeyError as exp:
3141 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3142
3143 return None
3144
3145 def list_org_action(self):
3146 """
3147 Method leverages vCloud director and query for available organization for particular user
3148
3149 Args:
3150 vca - is active VCA connection.
3151 vdc_name - is a vdc name that will be used to query vms action
3152
3153 Returns:
3154 The return XML respond
3155 """
3156 url_list = [self.url, '/api/org']
3157 vm_list_rest_call = ''.join(url_list)
3158
3159 if self.client._session:
3160 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3161 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3162
3163 response = self.perform_request(req_type='GET',
3164 url=vm_list_rest_call,
3165 headers=headers)
3166
3167 if response.status_code == 403:
3168 response = self.retry_rest('GET', vm_list_rest_call)
3169
3170 if response.status_code == requests.codes.ok:
3171 return response.content
3172
3173 return None
3174
3175 def get_org_action(self, org_uuid=None):
3176 """
3177 Method leverages vCloud director and retrieve available object for organization.
3178
3179 Args:
3180 org_uuid - vCD organization uuid
3181 self.client - is active connection.
3182
3183 Returns:
3184 The return XML respond
3185 """
3186
3187 if org_uuid is None:
3188 return None
3189
3190 url_list = [self.url, '/api/org/', org_uuid]
3191 vm_list_rest_call = ''.join(url_list)
3192
3193 if self.client._session:
3194 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3195 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3196
3197 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3198 response = self.perform_request(req_type='GET',
3199 url=vm_list_rest_call,
3200 headers=headers)
3201 if response.status_code == 403:
3202 response = self.retry_rest('GET', vm_list_rest_call)
3203
3204 if response.status_code == requests.codes.ok:
3205 return response.content
3206 return None
3207
3208 def get_org(self, org_uuid=None):
3209 """
3210 Method retrieves available organization in vCloud Director
3211
3212 Args:
3213 org_uuid - is a organization uuid.
3214
3215 Returns:
3216 The return dictionary with following key
3217 "network" - for network list under the org
3218 "catalogs" - for network list under the org
3219 "vdcs" - for vdc list under org
3220 """
3221
3222 org_dict = {}
3223
3224 if org_uuid is None:
3225 return org_dict
3226
3227 content = self.get_org_action(org_uuid=org_uuid)
3228 try:
3229 vdc_list = {}
3230 network_list = {}
3231 catalog_list = {}
3232 vm_list_xmlroot = XmlElementTree.fromstring(content)
3233 for child in vm_list_xmlroot:
3234 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3235 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3236 org_dict['vdcs'] = vdc_list
3237 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3238 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3239 org_dict['networks'] = network_list
3240 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3241 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3242 org_dict['catalogs'] = catalog_list
3243 except:
3244 pass
3245
3246 return org_dict
3247
3248 def get_org_list(self):
3249 """
3250 Method retrieves available organization in vCloud Director
3251
3252 Args:
3253 vca - is active VCA connection.
3254
3255 Returns:
3256 The return dictionary and key for each entry VDC UUID
3257 """
3258
3259 org_dict = {}
3260
3261 content = self.list_org_action()
3262 try:
3263 vm_list_xmlroot = XmlElementTree.fromstring(content)
3264 for vm_xml in vm_list_xmlroot:
3265 if vm_xml.tag.split("}")[1] == 'Org':
3266 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3267 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3268 except:
3269 pass
3270
3271 return org_dict
3272
3273 def vms_view_action(self, vdc_name=None):
3274 """ Method leverages vCloud director vms query call
3275
3276 Args:
3277 vca - is active VCA connection.
3278 vdc_name - is a vdc name that will be used to query vms action
3279
3280 Returns:
3281 The return XML respond
3282 """
3283 vca = self.connect()
3284 if vdc_name is None:
3285 return None
3286
3287 url_list = [vca.host, '/api/vms/query']
3288 vm_list_rest_call = ''.join(url_list)
3289
3290 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3291 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
3292 vca.vcloud_session.organization.Link)
3293 #For python3
3294 #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
3295 # ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3296 if len(refs) == 1:
3297 response = Http.get(url=vm_list_rest_call,
3298 headers=vca.vcloud_session.get_vcloud_headers(),
3299 verify=vca.verify,
3300 logger=vca.logger)
3301 if response.status_code == requests.codes.ok:
3302 return response.content
3303
3304 return None
3305
3306 def get_vapp_list(self, vdc_name=None):
3307 """
3308 Method retrieves vApp list deployed vCloud director and returns a dictionary
3309 contains a list of all vapp deployed for queried VDC.
3310 The key for a dictionary is vApp UUID
3311
3312
3313 Args:
3314 vca - is active VCA connection.
3315 vdc_name - is a vdc name that will be used to query vms action
3316
3317 Returns:
3318 The return dictionary and key for each entry vapp UUID
3319 """
3320
3321 vapp_dict = {}
3322 if vdc_name is None:
3323 return vapp_dict
3324
3325 content = self.vms_view_action(vdc_name=vdc_name)
3326 try:
3327 vm_list_xmlroot = XmlElementTree.fromstring(content)
3328 for vm_xml in vm_list_xmlroot:
3329 if vm_xml.tag.split("}")[1] == 'VMRecord':
3330 if vm_xml.attrib['isVAppTemplate'] == 'true':
3331 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3332 if 'vappTemplate-' in rawuuid[0]:
3333 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3334 # vm and use raw UUID as key
3335 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3336 except:
3337 pass
3338
3339 return vapp_dict
3340
3341 def get_vm_list(self, vdc_name=None):
3342 """
3343 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3344 contains a list of all VM's deployed for queried VDC.
3345 The key for a dictionary is VM UUID
3346
3347
3348 Args:
3349 vca - is active VCA connection.
3350 vdc_name - is a vdc name that will be used to query vms action
3351
3352 Returns:
3353 The return dictionary and key for each entry vapp UUID
3354 """
3355 vm_dict = {}
3356
3357 if vdc_name is None:
3358 return vm_dict
3359
3360 content = self.vms_view_action(vdc_name=vdc_name)
3361 try:
3362 vm_list_xmlroot = XmlElementTree.fromstring(content)
3363 for vm_xml in vm_list_xmlroot:
3364 if vm_xml.tag.split("}")[1] == 'VMRecord':
3365 if vm_xml.attrib['isVAppTemplate'] == 'false':
3366 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3367 if 'vm-' in rawuuid[0]:
3368 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3369 # vm and use raw UUID as key
3370 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3371 except:
3372 pass
3373
3374 return vm_dict
3375
3376 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3377 """
3378 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3379 contains a list of all VM's deployed for queried VDC.
3380 The key for a dictionary is VM UUID
3381
3382
3383 Args:
3384 vca - is active VCA connection.
3385 vdc_name - is a vdc name that will be used to query vms action
3386
3387 Returns:
3388 The return dictionary and key for each entry vapp UUID
3389 """
3390 vm_dict = {}
3391 vca = self.connect()
3392 if not vca:
3393 raise vimconn.vimconnConnectionException("self.connect() is failed")
3394
3395 if vdc_name is None:
3396 return vm_dict
3397
3398 content = self.vms_view_action(vdc_name=vdc_name)
3399 try:
3400 vm_list_xmlroot = XmlElementTree.fromstring(content)
3401 for vm_xml in vm_list_xmlroot:
3402 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3403 # lookup done by UUID
3404 if isuuid:
3405 if vapp_name in vm_xml.attrib['container']:
3406 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3407 if 'vm-' in rawuuid[0]:
3408 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3409 break
3410 # lookup done by Name
3411 else:
3412 if vapp_name in vm_xml.attrib['name']:
3413 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3414 if 'vm-' in rawuuid[0]:
3415 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3416 break
3417 except:
3418 pass
3419
3420 return vm_dict
3421
3422 def get_network_action(self, network_uuid=None):
3423 """
3424 Method leverages vCloud director and query network based on network uuid
3425
3426 Args:
3427 vca - is active VCA connection.
3428 network_uuid - is a network uuid
3429
3430 Returns:
3431 The return XML respond
3432 """
3433
3434 if network_uuid is None:
3435 return None
3436
3437 url_list = [self.url, '/api/network/', network_uuid]
3438 vm_list_rest_call = ''.join(url_list)
3439
3440 if self.client._session:
3441 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3442 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3443
3444 response = self.perform_request(req_type='GET',
3445 url=vm_list_rest_call,
3446 headers=headers)
3447 #Retry login if session expired & retry sending request
3448 if response.status_code == 403:
3449 response = self.retry_rest('GET', vm_list_rest_call)
3450
3451 if response.status_code == requests.codes.ok:
3452 return response.content
3453
3454 return None
3455
3456 def get_vcd_network(self, network_uuid=None):
3457 """
3458 Method retrieves available network from vCloud Director
3459
3460 Args:
3461 network_uuid - is VCD network UUID
3462
3463 Each element serialized as key : value pair
3464
3465 Following keys available for access. network_configuration['Gateway'}
3466 <Configuration>
3467 <IpScopes>
3468 <IpScope>
3469 <IsInherited>true</IsInherited>
3470 <Gateway>172.16.252.100</Gateway>
3471 <Netmask>255.255.255.0</Netmask>
3472 <Dns1>172.16.254.201</Dns1>
3473 <Dns2>172.16.254.202</Dns2>
3474 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3475 <IsEnabled>true</IsEnabled>
3476 <IpRanges>
3477 <IpRange>
3478 <StartAddress>172.16.252.1</StartAddress>
3479 <EndAddress>172.16.252.99</EndAddress>
3480 </IpRange>
3481 </IpRanges>
3482 </IpScope>
3483 </IpScopes>
3484 <FenceMode>bridged</FenceMode>
3485
3486 Returns:
3487 The return dictionary and key for each entry vapp UUID
3488 """
3489
3490 network_configuration = {}
3491 if network_uuid is None:
3492 return network_uuid
3493
3494 try:
3495 content = self.get_network_action(network_uuid=network_uuid)
3496 vm_list_xmlroot = XmlElementTree.fromstring(content)
3497
3498 network_configuration['status'] = vm_list_xmlroot.get("status")
3499 network_configuration['name'] = vm_list_xmlroot.get("name")
3500 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3501
3502 for child in vm_list_xmlroot:
3503 if child.tag.split("}")[1] == 'IsShared':
3504 network_configuration['isShared'] = child.text.strip()
3505 if child.tag.split("}")[1] == 'Configuration':
3506 for configuration in child.iter():
3507 tagKey = configuration.tag.split("}")[1].strip()
3508 if tagKey != "":
3509 network_configuration[tagKey] = configuration.text.strip()
3510 return network_configuration
3511 except Exception as exp :
3512 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3513 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3514
3515 return network_configuration
3516
3517 def delete_network_action(self, network_uuid=None):
3518 """
3519 Method delete given network from vCloud director
3520
3521 Args:
3522 network_uuid - is a network uuid that client wish to delete
3523
3524 Returns:
3525 The return None or XML respond or false
3526 """
3527 client = self.connect_as_admin()
3528 if not client:
3529 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3530 if network_uuid is None:
3531 return False
3532
3533 url_list = [self.url, '/api/admin/network/', network_uuid]
3534 vm_list_rest_call = ''.join(url_list)
3535
3536 if client._session:
3537 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3538 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3539 response = self.perform_request(req_type='DELETE',
3540 url=vm_list_rest_call,
3541 headers=headers)
3542 if response.status_code == 202:
3543 return True
3544
3545 return False
3546
3547 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3548 ip_profile=None, isshared='true'):
3549 """
3550 Method create network in vCloud director
3551
3552 Args:
3553 network_name - is network name to be created.
3554 net_type - can be 'bridge','data','ptp','mgmt'.
3555 ip_profile is a dict containing the IP parameters of the network
3556 isshared - is a boolean
3557 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3558 It optional attribute. by default if no parent network indicate the first available will be used.
3559
3560 Returns:
3561 The return network uuid or return None
3562 """
3563
3564 new_network_name = [network_name, '-', str(uuid.uuid4())]
3565 content = self.create_network_rest(network_name=''.join(new_network_name),
3566 ip_profile=ip_profile,
3567 net_type=net_type,
3568 parent_network_uuid=parent_network_uuid,
3569 isshared=isshared)
3570 if content is None:
3571 self.logger.debug("Failed create network {}.".format(network_name))
3572 return None
3573
3574 try:
3575 vm_list_xmlroot = XmlElementTree.fromstring(content)
3576 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3577 if len(vcd_uuid) == 4:
3578 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3579 return vcd_uuid[3]
3580 except:
3581 self.logger.debug("Failed create network {}".format(network_name))
3582 return None
3583
3584 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3585 ip_profile=None, isshared='true'):
3586 """
3587 Method create network in vCloud director
3588
3589 Args:
3590 network_name - is network name to be created.
3591 net_type - can be 'bridge','data','ptp','mgmt'.
3592 ip_profile is a dict containing the IP parameters of the network
3593 isshared - is a boolean
3594 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3595 It optional attribute. by default if no parent network indicate the first available will be used.
3596
3597 Returns:
3598 The return network uuid or return None
3599 """
3600 client_as_admin = self.connect_as_admin()
3601 if not client_as_admin:
3602 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3603 if network_name is None:
3604 return None
3605
3606 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3607 vm_list_rest_call = ''.join(url_list)
3608
3609 if client_as_admin._session:
3610 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3611 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3612
3613 response = self.perform_request(req_type='GET',
3614 url=vm_list_rest_call,
3615 headers=headers)
3616
3617 provider_network = None
3618 available_networks = None
3619 add_vdc_rest_url = None
3620
3621 if response.status_code != requests.codes.ok:
3622 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3623 response.status_code))
3624 return None
3625 else:
3626 try:
3627 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3628 for child in vm_list_xmlroot:
3629 if child.tag.split("}")[1] == 'ProviderVdcReference':
3630 provider_network = child.attrib.get('href')
3631 # application/vnd.vmware.admin.providervdc+xml
3632 if child.tag.split("}")[1] == 'Link':
3633 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3634 and child.attrib.get('rel') == 'add':
3635 add_vdc_rest_url = child.attrib.get('href')
3636 except:
3637 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3638 self.logger.debug("Respond body {}".format(response.content))
3639 return None
3640
3641 # find pvdc provided available network
3642 response = self.perform_request(req_type='GET',
3643 url=provider_network,
3644 headers=headers)
3645 if response.status_code != requests.codes.ok:
3646 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3647 response.status_code))
3648 return None
3649
3650 if parent_network_uuid is None:
3651 try:
3652 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3653 for child in vm_list_xmlroot.iter():
3654 if child.tag.split("}")[1] == 'AvailableNetworks':
3655 for networks in child.iter():
3656 # application/vnd.vmware.admin.network+xml
3657 if networks.attrib.get('href') is not None:
3658 available_networks = networks.attrib.get('href')
3659 break
3660 except:
3661 return None
3662
3663 try:
3664 #Configure IP profile of the network
3665 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3666
3667 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3668 subnet_rand = random.randint(0, 255)
3669 ip_base = "192.168.{}.".format(subnet_rand)
3670 ip_profile['subnet_address'] = ip_base + "0/24"
3671 else:
3672 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3673
3674 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3675 ip_profile['gateway_address']=ip_base + "1"
3676 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3677 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3678 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3679 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3680 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3681 ip_profile['dhcp_start_address']=ip_base + "3"
3682 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3683 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3684 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3685 ip_profile['dns_address']=ip_base + "2"
3686
3687 gateway_address=ip_profile['gateway_address']
3688 dhcp_count=int(ip_profile['dhcp_count'])
3689 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3690
3691 if ip_profile['dhcp_enabled']==True:
3692 dhcp_enabled='true'
3693 else:
3694 dhcp_enabled='false'
3695 dhcp_start_address=ip_profile['dhcp_start_address']
3696
3697 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3698 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3699 end_ip_int += dhcp_count - 1
3700 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3701
3702 ip_version=ip_profile['ip_version']
3703 dns_address=ip_profile['dns_address']
3704 except KeyError as exp:
3705 self.logger.debug("Create Network REST: Key error {}".format(exp))
3706 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3707
3708 # either use client provided UUID or search for a first available
3709 # if both are not defined we return none
3710 if parent_network_uuid is not None:
3711 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3712 add_vdc_rest_url = ''.join(url_list)
3713
3714 #Creating all networks as Direct Org VDC type networks.
3715 #Unused in case of Underlay (data/ptp) network interface.
3716 fence_mode="bridged"
3717 is_inherited='false'
3718 dns_list = dns_address.split(";")
3719 dns1 = dns_list[0]
3720 dns2_text = ""
3721 if len(dns_list) >= 2:
3722 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3723 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3724 <Description>Openmano created</Description>
3725 <Configuration>
3726 <IpScopes>
3727 <IpScope>
3728 <IsInherited>{1:s}</IsInherited>
3729 <Gateway>{2:s}</Gateway>
3730 <Netmask>{3:s}</Netmask>
3731 <Dns1>{4:s}</Dns1>{5:s}
3732 <IsEnabled>{6:s}</IsEnabled>
3733 <IpRanges>
3734 <IpRange>
3735 <StartAddress>{7:s}</StartAddress>
3736 <EndAddress>{8:s}</EndAddress>
3737 </IpRange>
3738 </IpRanges>
3739 </IpScope>
3740 </IpScopes>
3741 <ParentNetwork href="{9:s}"/>
3742 <FenceMode>{10:s}</FenceMode>
3743 </Configuration>
3744 <IsShared>{11:s}</IsShared>
3745 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3746 subnet_address, dns1, dns2_text, dhcp_enabled,
3747 dhcp_start_address, dhcp_end_address, available_networks,
3748 fence_mode, isshared)
3749
3750 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3751 try:
3752 response = self.perform_request(req_type='POST',
3753 url=add_vdc_rest_url,
3754 headers=headers,
3755 data=data)
3756
3757 if response.status_code != 201:
3758 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
3759 .format(response.status_code,response.content))
3760 else:
3761 network_task = self.get_task_from_response(response.content)
3762 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3763 time.sleep(5)
3764 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3765 if result.get('status') == 'success':
3766 return response.content
3767 else:
3768 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3769 .format(response.content))
3770 except Exception as exp:
3771 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3772
3773 return None
3774
3775 def convert_cidr_to_netmask(self, cidr_ip=None):
3776 """
3777 Method sets convert CIDR netmask address to normal IP format
3778 Args:
3779 cidr_ip : CIDR IP address
3780 Returns:
3781 netmask : Converted netmask
3782 """
3783 if cidr_ip is not None:
3784 if '/' in cidr_ip:
3785 network, net_bits = cidr_ip.split('/')
3786 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3787 else:
3788 netmask = cidr_ip
3789 return netmask
3790 return None
3791
3792 def get_provider_rest(self, vca=None):
3793 """
3794 Method gets provider vdc view from vcloud director
3795
3796 Args:
3797 network_name - is network name to be created.
3798 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3799 It optional attribute. by default if no parent network indicate the first available will be used.
3800
3801 Returns:
3802 The return xml content of respond or None
3803 """
3804
3805 url_list = [self.url, '/api/admin']
3806 if vca:
3807 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3808 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3809 response = self.perform_request(req_type='GET',
3810 url=''.join(url_list),
3811 headers=headers)
3812
3813 if response.status_code == requests.codes.ok:
3814 return response.content
3815 return None
3816
3817 def create_vdc(self, vdc_name=None):
3818
3819 vdc_dict = {}
3820
3821 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3822 if xml_content is not None:
3823 try:
3824 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3825 for child in task_resp_xmlroot:
3826 if child.tag.split("}")[1] == 'Owner':
3827 vdc_id = child.attrib.get('href').split("/")[-1]
3828 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3829 return vdc_dict
3830 except:
3831 self.logger.debug("Respond body {}".format(xml_content))
3832
3833 return None
3834
3835 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3836 """
3837 Method create vdc in vCloud director based on VDC template.
3838 it uses pre-defined template.
3839
3840 Args:
3841 vdc_name - name of a new vdc.
3842
3843 Returns:
3844 The return xml content of respond or None
3845 """
3846 # pre-requesite atleast one vdc template should be available in vCD
3847 self.logger.info("Creating new vdc {}".format(vdc_name))
3848 vca = self.connect_as_admin()
3849 if not vca:
3850 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3851 if vdc_name is None:
3852 return None
3853
3854 url_list = [self.url, '/api/vdcTemplates']
3855 vm_list_rest_call = ''.join(url_list)
3856
3857 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3858 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
3859 response = self.perform_request(req_type='GET',
3860 url=vm_list_rest_call,
3861 headers=headers)
3862
3863 # container url to a template
3864 vdc_template_ref = None
3865 try:
3866 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3867 for child in vm_list_xmlroot:
3868 # application/vnd.vmware.admin.providervdc+xml
3869 # we need find a template from witch we instantiate VDC
3870 if child.tag.split("}")[1] == 'VdcTemplate':
3871 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3872 vdc_template_ref = child.attrib.get('href')
3873 except:
3874 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3875 self.logger.debug("Respond body {}".format(response.content))
3876 return None
3877
3878 # if we didn't found required pre defined template we return None
3879 if vdc_template_ref is None:
3880 return None
3881
3882 try:
3883 # instantiate vdc
3884 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
3885 vm_list_rest_call = ''.join(url_list)
3886 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3887 <Source href="{1:s}"></Source>
3888 <Description>opnemano</Description>
3889 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3890
3891 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3892
3893 response = self.perform_request(req_type='POST',
3894 url=vm_list_rest_call,
3895 headers=headers,
3896 data=data)
3897
3898 vdc_task = self.get_task_from_response(response.content)
3899 self.client.get_task_monitor().wait_for_success(task=vdc_task)
3900
3901 # if we all ok we respond with content otherwise by default None
3902 if response.status_code >= 200 and response.status_code < 300:
3903 return response.content
3904 return None
3905 except:
3906 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3907 self.logger.debug("Respond body {}".format(response.content))
3908
3909 return None
3910
3911 def create_vdc_rest(self, vdc_name=None):
3912 """
3913 Method create network in vCloud director
3914
3915 Args:
3916 vdc_name - vdc name to be created
3917 Returns:
3918 The return response
3919 """
3920
3921 self.logger.info("Creating new vdc {}".format(vdc_name))
3922
3923 vca = self.connect_as_admin()
3924 if not vca:
3925 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3926 if vdc_name is None:
3927 return None
3928
3929 url_list = [self.url, '/api/admin/org/', self.org_uuid]
3930 vm_list_rest_call = ''.join(url_list)
3931
3932 if vca._session:
3933 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3934 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3935 response = self.perform_request(req_type='GET',
3936 url=vm_list_rest_call,
3937 headers=headers)
3938
3939 provider_vdc_ref = None
3940 add_vdc_rest_url = None
3941 available_networks = None
3942
3943 if response.status_code != requests.codes.ok:
3944 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3945 response.status_code))
3946 return None
3947 else:
3948 try:
3949 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3950 for child in vm_list_xmlroot:
3951 # application/vnd.vmware.admin.providervdc+xml
3952 if child.tag.split("}")[1] == 'Link':
3953 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3954 and child.attrib.get('rel') == 'add':
3955 add_vdc_rest_url = child.attrib.get('href')
3956 except:
3957 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3958 self.logger.debug("Respond body {}".format(response.content))
3959 return None
3960
3961 response = self.get_provider_rest(vca=vca)
3962 try:
3963 vm_list_xmlroot = XmlElementTree.fromstring(response)
3964 for child in vm_list_xmlroot:
3965 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3966 for sub_child in child:
3967 provider_vdc_ref = sub_child.attrib.get('href')
3968 except:
3969 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3970 self.logger.debug("Respond body {}".format(response))
3971 return None
3972
3973 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3974 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3975 <AllocationModel>ReservationPool</AllocationModel>
3976 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3977 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3978 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3979 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3980 <ProviderVdcReference
3981 name="Main Provider"
3982 href="{2:s}" />
3983 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3984 escape(vdc_name),
3985 provider_vdc_ref)
3986
3987 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3988
3989 response = self.perform_request(req_type='POST',
3990 url=add_vdc_rest_url,
3991 headers=headers,
3992 data=data)
3993
3994 # if we all ok we respond with content otherwise by default None
3995 if response.status_code == 201:
3996 return response.content
3997 return None
3998
3999 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4000 """
4001 Method retrieve vapp detail from vCloud director
4002
4003 Args:
4004 vapp_uuid - is vapp identifier.
4005
4006 Returns:
4007 The return network uuid or return None
4008 """
4009
4010 parsed_respond = {}
4011 vca = None
4012
4013 if need_admin_access:
4014 vca = self.connect_as_admin()
4015 else:
4016 vca = self.client
4017
4018 if not vca:
4019 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4020 if vapp_uuid is None:
4021 return None
4022
4023 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4024 get_vapp_restcall = ''.join(url_list)
4025
4026 if vca._session:
4027 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4028 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4029 response = self.perform_request(req_type='GET',
4030 url=get_vapp_restcall,
4031 headers=headers)
4032
4033 if response.status_code == 403:
4034 if need_admin_access == False:
4035 response = self.retry_rest('GET', get_vapp_restcall)
4036
4037 if response.status_code != requests.codes.ok:
4038 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4039 response.status_code))
4040 return parsed_respond
4041
4042 try:
4043 xmlroot_respond = XmlElementTree.fromstring(response.content)
4044 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4045
4046 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4047 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4048 'vmw': 'http://www.vmware.com/schema/ovf',
4049 'vm': 'http://www.vmware.com/vcloud/v1.5',
4050 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4051 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4052 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4053 }
4054
4055 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4056 if created_section is not None:
4057 parsed_respond['created'] = created_section.text
4058
4059 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4060 if network_section is not None and 'networkName' in network_section.attrib:
4061 parsed_respond['networkname'] = network_section.attrib['networkName']
4062
4063 ipscopes_section = \
4064 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4065 namespaces)
4066 if ipscopes_section is not None:
4067 for ipscope in ipscopes_section:
4068 for scope in ipscope:
4069 tag_key = scope.tag.split("}")[1]
4070 if tag_key == 'IpRanges':
4071 ip_ranges = scope.getchildren()
4072 for ipblock in ip_ranges:
4073 for block in ipblock:
4074 parsed_respond[block.tag.split("}")[1]] = block.text
4075 else:
4076 parsed_respond[tag_key] = scope.text
4077
4078 # parse children section for other attrib
4079 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4080 if children_section is not None:
4081 parsed_respond['name'] = children_section.attrib['name']
4082 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4083 if "nestedHypervisorEnabled" in children_section.attrib else None
4084 parsed_respond['deployed'] = children_section.attrib['deployed']
4085 parsed_respond['status'] = children_section.attrib['status']
4086 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4087 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4088 nic_list = []
4089 for adapters in network_adapter:
4090 adapter_key = adapters.tag.split("}")[1]
4091 if adapter_key == 'PrimaryNetworkConnectionIndex':
4092 parsed_respond['primarynetwork'] = adapters.text
4093 if adapter_key == 'NetworkConnection':
4094 vnic = {}
4095 if 'network' in adapters.attrib:
4096 vnic['network'] = adapters.attrib['network']
4097 for adapter in adapters:
4098 setting_key = adapter.tag.split("}")[1]
4099 vnic[setting_key] = adapter.text
4100 nic_list.append(vnic)
4101
4102 for link in children_section:
4103 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4104 if link.attrib['rel'] == 'screen:acquireTicket':
4105 parsed_respond['acquireTicket'] = link.attrib
4106 if link.attrib['rel'] == 'screen:acquireMksTicket':
4107 parsed_respond['acquireMksTicket'] = link.attrib
4108
4109 parsed_respond['interfaces'] = nic_list
4110 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4111 if vCloud_extension_section is not None:
4112 vm_vcenter_info = {}
4113 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4114 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4115 if vmext is not None:
4116 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4117 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4118
4119 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4120 vm_virtual_hardware_info = {}
4121 if virtual_hardware_section is not None:
4122 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4123 if item.find("rasd:Description",namespaces).text == "Hard disk":
4124 disk_size = item.find("rasd:HostResource" ,namespaces
4125 ).attrib["{"+namespaces['vm']+"}capacity"]
4126
4127 vm_virtual_hardware_info["disk_size"]= disk_size
4128 break
4129
4130 for link in virtual_hardware_section:
4131 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4132 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4133 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4134 break
4135
4136 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4137 except Exception as exp :
4138 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4139 return parsed_respond
4140
4141 def acquire_console(self, vm_uuid=None):
4142
4143 if vm_uuid is None:
4144 return None
4145 if self.client._session:
4146 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4147 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4148 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4149 console_dict = vm_dict['acquireTicket']
4150 console_rest_call = console_dict['href']
4151
4152 response = self.perform_request(req_type='POST',
4153 url=console_rest_call,
4154 headers=headers)
4155
4156 if response.status_code == 403:
4157 response = self.retry_rest('POST', console_rest_call)
4158
4159 if response.status_code == requests.codes.ok:
4160 return response.content
4161
4162 return None
4163
4164 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4165 """
4166 Method retrieve vm disk details
4167
4168 Args:
4169 vapp_uuid - is vapp identifier.
4170 flavor_disk - disk size as specified in VNFD (flavor)
4171
4172 Returns:
4173 The return network uuid or return None
4174 """
4175 status = None
4176 try:
4177 #Flavor disk is in GB convert it into MB
4178 flavor_disk = int(flavor_disk) * 1024
4179 vm_details = self.get_vapp_details_rest(vapp_uuid)
4180 if vm_details:
4181 vm_name = vm_details["name"]
4182 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4183
4184 if vm_details and "vm_virtual_hardware" in vm_details:
4185 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4186 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4187
4188 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4189
4190 if flavor_disk > vm_disk:
4191 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4192 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4193 vm_disk, flavor_disk ))
4194 else:
4195 status = True
4196 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4197
4198 return status
4199 except Exception as exp:
4200 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4201
4202
4203 def modify_vm_disk_rest(self, disk_href , disk_size):
4204 """
4205 Method retrieve modify vm disk size
4206
4207 Args:
4208 disk_href - vCD API URL to GET and PUT disk data
4209 disk_size - disk size as specified in VNFD (flavor)
4210
4211 Returns:
4212 The return network uuid or return None
4213 """
4214 if disk_href is None or disk_size is None:
4215 return None
4216
4217 if self.client._session:
4218 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4219 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4220 response = self.perform_request(req_type='GET',
4221 url=disk_href,
4222 headers=headers)
4223
4224 if response.status_code == 403:
4225 response = self.retry_rest('GET', disk_href)
4226
4227 if response.status_code != requests.codes.ok:
4228 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4229 response.status_code))
4230 return None
4231 try:
4232 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4233 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4234 #For python3
4235 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
4236 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4237
4238 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4239 if item.find("rasd:Description",namespaces).text == "Hard disk":
4240 disk_item = item.find("rasd:HostResource" ,namespaces )
4241 if disk_item is not None:
4242 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4243 break
4244
4245 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4246 xml_declaration=True)
4247
4248 #Send PUT request to modify disk size
4249 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4250
4251 response = self.perform_request(req_type='PUT',
4252 url=disk_href,
4253 headers=headers,
4254 data=data)
4255 if response.status_code == 403:
4256 add_headers = {'Content-Type': headers['Content-Type']}
4257 response = self.retry_rest('PUT', disk_href, add_headers, data)
4258
4259 if response.status_code != 202:
4260 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4261 response.status_code))
4262 else:
4263 modify_disk_task = self.get_task_from_response(response.content)
4264 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4265 if result.get('status') == 'success':
4266 return True
4267 else:
4268 return False
4269 return None
4270
4271 except Exception as exp :
4272 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4273 return None
4274
4275 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4276 """
4277 Method to attach pci devices to VM
4278
4279 Args:
4280 vapp_uuid - uuid of vApp/VM
4281 pci_devices - pci devices infromation as specified in VNFD (flavor)
4282
4283 Returns:
4284 The status of add pci device task , vm object and
4285 vcenter_conect object
4286 """
4287 vm_obj = None
4288 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4289 vcenter_conect, content = self.get_vcenter_content()
4290 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4291
4292 if vm_moref_id:
4293 try:
4294 no_of_pci_devices = len(pci_devices)
4295 if no_of_pci_devices > 0:
4296 #Get VM and its host
4297 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4298 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4299 if host_obj and vm_obj:
4300 #get PCI devies from host on which vapp is currently installed
4301 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4302
4303 if avilable_pci_devices is None:
4304 #find other hosts with active pci devices
4305 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4306 content,
4307 no_of_pci_devices
4308 )
4309
4310 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4311 #Migrate vm to the host where PCI devices are availble
4312 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4313 task = self.relocate_vm(new_host_obj, vm_obj)
4314 if task is not None:
4315 result = self.wait_for_vcenter_task(task, vcenter_conect)
4316 self.logger.info("Migrate VM status: {}".format(result))
4317 host_obj = new_host_obj
4318 else:
4319 self.logger.info("Fail to migrate VM : {}".format(result))
4320 raise vimconn.vimconnNotFoundException(
4321 "Fail to migrate VM : {} to host {}".format(
4322 vmname_andid,
4323 new_host_obj)
4324 )
4325
4326 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4327 #Add PCI devices one by one
4328 for pci_device in avilable_pci_devices:
4329 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4330 if task:
4331 status= self.wait_for_vcenter_task(task, vcenter_conect)
4332 if status:
4333 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4334 else:
4335 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4336 return True, vm_obj, vcenter_conect
4337 else:
4338 self.logger.error("Currently there is no host with"\
4339 " {} number of avaialble PCI devices required for VM {}".format(
4340 no_of_pci_devices,
4341 vmname_andid)
4342 )
4343 raise vimconn.vimconnNotFoundException(
4344 "Currently there is no host with {} "\
4345 "number of avaialble PCI devices required for VM {}".format(
4346 no_of_pci_devices,
4347 vmname_andid))
4348 else:
4349 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4350
4351 except vmodl.MethodFault as error:
4352 self.logger.error("Error occurred while adding PCI devices {} ",error)
4353 return None, vm_obj, vcenter_conect
4354
4355 def get_vm_obj(self, content, mob_id):
4356 """
4357 Method to get the vsphere VM object associated with a given morf ID
4358 Args:
4359 vapp_uuid - uuid of vApp/VM
4360 content - vCenter content object
4361 mob_id - mob_id of VM
4362
4363 Returns:
4364 VM and host object
4365 """
4366 vm_obj = None
4367 host_obj = None
4368 try :
4369 container = content.viewManager.CreateContainerView(content.rootFolder,
4370 [vim.VirtualMachine], True
4371 )
4372 for vm in container.view:
4373 mobID = vm._GetMoId()
4374 if mobID == mob_id:
4375 vm_obj = vm
4376 host_obj = vm_obj.runtime.host
4377 break
4378 except Exception as exp:
4379 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4380 return host_obj, vm_obj
4381
4382 def get_pci_devices(self, host, need_devices):
4383 """
4384 Method to get the details of pci devices on given host
4385 Args:
4386 host - vSphere host object
4387 need_devices - number of pci devices needed on host
4388
4389 Returns:
4390 array of pci devices
4391 """
4392 all_devices = []
4393 all_device_ids = []
4394 used_devices_ids = []
4395
4396 try:
4397 if host:
4398 pciPassthruInfo = host.config.pciPassthruInfo
4399 pciDevies = host.hardware.pciDevice
4400
4401 for pci_status in pciPassthruInfo:
4402 if pci_status.passthruActive:
4403 for device in pciDevies:
4404 if device.id == pci_status.id:
4405 all_device_ids.append(device.id)
4406 all_devices.append(device)
4407
4408 #check if devices are in use
4409 avalible_devices = all_devices
4410 for vm in host.vm:
4411 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4412 vm_devices = vm.config.hardware.device
4413 for device in vm_devices:
4414 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4415 if device.backing.id in all_device_ids:
4416 for use_device in avalible_devices:
4417 if use_device.id == device.backing.id:
4418 avalible_devices.remove(use_device)
4419 used_devices_ids.append(device.backing.id)
4420 self.logger.debug("Device {} from devices {}"\
4421 "is in use".format(device.backing.id,
4422 device)
4423 )
4424 if len(avalible_devices) < need_devices:
4425 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4426 need_devices))
4427 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4428 avalible_devices))
4429 return None
4430 else:
4431 required_devices = avalible_devices[:need_devices]
4432 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4433 len(avalible_devices),
4434 host,
4435 need_devices))
4436 self.logger.info("Retruning {} devices as {}".format(need_devices,
4437 required_devices ))
4438 return required_devices
4439
4440 except Exception as exp:
4441 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4442
4443 return None
4444
4445 def get_host_and_PCIdevices(self, content, need_devices):
4446 """
4447 Method to get the details of pci devices infromation on all hosts
4448
4449 Args:
4450 content - vSphere host object
4451 need_devices - number of pci devices needed on host
4452
4453 Returns:
4454 array of pci devices and host object
4455 """
4456 host_obj = None
4457 pci_device_objs = None
4458 try:
4459 if content:
4460 container = content.viewManager.CreateContainerView(content.rootFolder,
4461 [vim.HostSystem], True)
4462 for host in container.view:
4463 devices = self.get_pci_devices(host, need_devices)
4464 if devices:
4465 host_obj = host
4466 pci_device_objs = devices
4467 break
4468 except Exception as exp:
4469 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4470
4471 return host_obj,pci_device_objs
4472
4473 def relocate_vm(self, dest_host, vm) :
4474 """
4475 Method to get the relocate VM to new host
4476
4477 Args:
4478 dest_host - vSphere host object
4479 vm - vSphere VM object
4480
4481 Returns:
4482 task object
4483 """
4484 task = None
4485 try:
4486 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4487 task = vm.Relocate(relocate_spec)
4488 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4489 except Exception as exp:
4490 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4491 dest_host, vm, exp))
4492 return task
4493
4494 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4495 """
4496 Waits and provides updates on a vSphere task
4497 """
4498 while task.info.state == vim.TaskInfo.State.running:
4499 time.sleep(2)
4500
4501 if task.info.state == vim.TaskInfo.State.success:
4502 if task.info.result is not None and not hideResult:
4503 self.logger.info('{} completed successfully, result: {}'.format(
4504 actionName,
4505 task.info.result))
4506 else:
4507 self.logger.info('Task {} completed successfully.'.format(actionName))
4508 else:
4509 self.logger.error('{} did not complete successfully: {} '.format(
4510 actionName,
4511 task.info.error)
4512 )
4513
4514 return task.info.result
4515
4516 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4517 """
4518 Method to add pci device in given VM
4519
4520 Args:
4521 host_object - vSphere host object
4522 vm_object - vSphere VM object
4523 host_pci_dev - host_pci_dev must be one of the devices from the
4524 host_object.hardware.pciDevice list
4525 which is configured as a PCI passthrough device
4526
4527 Returns:
4528 task object
4529 """
4530 task = None
4531 if vm_object and host_object and host_pci_dev:
4532 try :
4533 #Add PCI device to VM
4534 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4535 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4536
4537 if host_pci_dev.id not in systemid_by_pciid:
4538 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4539 return None
4540
4541 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4542 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4543 id=host_pci_dev.id,
4544 systemId=systemid_by_pciid[host_pci_dev.id],
4545 vendorId=host_pci_dev.vendorId,
4546 deviceName=host_pci_dev.deviceName)
4547
4548 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4549
4550 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4551 new_device_config.operation = "add"
4552 vmConfigSpec = vim.vm.ConfigSpec()
4553 vmConfigSpec.deviceChange = [new_device_config]
4554
4555 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4556 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4557 host_pci_dev, vm_object, host_object)
4558 )
4559 except Exception as exp:
4560 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4561 host_pci_dev,
4562 vm_object,
4563 exp))
4564 return task
4565
4566 def get_vm_vcenter_info(self):
4567 """
4568 Method to get details of vCenter and vm
4569
4570 Args:
4571 vapp_uuid - uuid of vApp or VM
4572
4573 Returns:
4574 Moref Id of VM and deails of vCenter
4575 """
4576 vm_vcenter_info = {}
4577
4578 if self.vcenter_ip is not None:
4579 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4580 else:
4581 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4582 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4583 if self.vcenter_port is not None:
4584 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4585 else:
4586 raise vimconn.vimconnException(message="vCenter port is not provided."\
4587 " Please provide vCenter port while attaching datacenter to tenant in --config")
4588 if self.vcenter_user is not None:
4589 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4590 else:
4591 raise vimconn.vimconnException(message="vCenter user is not provided."\
4592 " Please provide vCenter user while attaching datacenter to tenant in --config")
4593
4594 if self.vcenter_password is not None:
4595 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4596 else:
4597 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4598 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4599
4600 return vm_vcenter_info
4601
4602
4603 def get_vm_pci_details(self, vmuuid):
4604 """
4605 Method to get VM PCI device details from vCenter
4606
4607 Args:
4608 vm_obj - vSphere VM object
4609
4610 Returns:
4611 dict of PCI devives attached to VM
4612
4613 """
4614 vm_pci_devices_info = {}
4615 try:
4616 vcenter_conect, content = self.get_vcenter_content()
4617 vm_moref_id = self.get_vm_moref_id(vmuuid)
4618 if vm_moref_id:
4619 #Get VM and its host
4620 if content:
4621 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4622 if host_obj and vm_obj:
4623 vm_pci_devices_info["host_name"]= host_obj.name
4624 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4625 for device in vm_obj.config.hardware.device:
4626 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4627 device_details={'devide_id':device.backing.id,
4628 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4629 }
4630 vm_pci_devices_info[device.deviceInfo.label] = device_details
4631 else:
4632 self.logger.error("Can not connect to vCenter while getting "\
4633 "PCI devices infromationn")
4634 return vm_pci_devices_info
4635 except Exception as exp:
4636 self.logger.error("Error occurred while getting VM infromationn"\
4637 " for VM : {}".format(exp))
4638 raise vimconn.vimconnException(message=exp)
4639
4640 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
4641 """
4642 Method to add network adapter type to vm
4643 Args :
4644 network_name - name of network
4645 primary_nic_index - int value for primary nic index
4646 nicIndex - int value for nic index
4647 nic_type - specify model name to which add to vm
4648 Returns:
4649 None
4650 """
4651
4652 self.logger.info("Add network adapter to VM: network_name {} nicIndex {}".\
4653 format(network_name, nicIndex))
4654 try:
4655 ip_address = None
4656 floating_ip = False
4657 mac_address = None
4658 if 'floating_ip' in net: floating_ip = net['floating_ip']
4659
4660 # Stub for ip_address feature
4661 if 'ip_address' in net: ip_address = net['ip_address']
4662
4663 if 'mac_address' in net: mac_address = net['mac_address']
4664
4665 if floating_ip:
4666 allocation_mode = "POOL"
4667 elif ip_address:
4668 allocation_mode = "MANUAL"
4669 else:
4670 allocation_mode = "DHCP"
4671
4672 if not nic_type:
4673 for vms in vapp.get_all_vms():
4674 vm_id = vms.get('id').split(':')[-1]
4675
4676 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4677
4678 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4679 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4680 response = self.perform_request(req_type='GET',
4681 url=url_rest_call,
4682 headers=headers)
4683
4684 if response.status_code == 403:
4685 response = self.retry_rest('GET', url_rest_call)
4686
4687 if response.status_code != 200:
4688 self.logger.error("REST call {} failed reason : {}"\
4689 "status code : {}".format(url_rest_call,
4690 response.content,
4691 response.status_code))
4692 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4693 "network connection section")
4694
4695 data = response.content
4696 data = data.split('<Link rel="edit"')[0]
4697 if '<PrimaryNetworkConnectionIndex>' not in data:
4698 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
4699 <NetworkConnection network="{}">
4700 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4701 <IsConnected>true</IsConnected>
4702 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4703 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
4704 allocation_mode)
4705 # Stub for ip_address feature
4706 if ip_address:
4707 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4708 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4709
4710 if mac_address:
4711 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4712 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4713
4714 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
4715 else:
4716 new_item = """<NetworkConnection network="{}">
4717 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4718 <IsConnected>true</IsConnected>
4719 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4720 </NetworkConnection>""".format(network_name, nicIndex,
4721 allocation_mode)
4722 # Stub for ip_address feature
4723 if ip_address:
4724 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4725 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4726
4727 if mac_address:
4728 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4729 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4730
4731 data = data + new_item + '</NetworkConnectionSection>'
4732
4733 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4734
4735 response = self.perform_request(req_type='PUT',
4736 url=url_rest_call,
4737 headers=headers,
4738 data=data)
4739
4740 if response.status_code == 403:
4741 add_headers = {'Content-Type': headers['Content-Type']}
4742 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
4743
4744 if response.status_code != 202:
4745 self.logger.error("REST call {} failed reason : {}"\
4746 "status code : {} ".format(url_rest_call,
4747 response.content,
4748 response.status_code))
4749 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
4750 "network connection section")
4751 else:
4752 nic_task = self.get_task_from_response(response.content)
4753 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4754 if result.get('status') == 'success':
4755 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
4756 "default NIC type".format(vm_id))
4757 else:
4758 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
4759 "connect NIC type".format(vm_id))
4760 else:
4761 for vms in vapp.get_all_vms():
4762 vm_id = vms.get('id').split(':')[-1]
4763
4764
4765 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4766
4767 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4768 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4769 response = self.perform_request(req_type='GET',
4770 url=url_rest_call,
4771 headers=headers)
4772
4773 if response.status_code == 403:
4774 response = self.retry_rest('GET', url_rest_call)
4775
4776 if response.status_code != 200:
4777 self.logger.error("REST call {} failed reason : {}"\
4778 "status code : {}".format(url_rest_call,
4779 response.content,
4780 response.status_code))
4781 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4782 "network connection section")
4783 data = response.content
4784 data = data.split('<Link rel="edit"')[0]
4785 if '<PrimaryNetworkConnectionIndex>' not in data:
4786 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
4787 <NetworkConnection network="{}">
4788 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4789 <IsConnected>true</IsConnected>
4790 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4791 <NetworkAdapterType>{}</NetworkAdapterType>
4792 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
4793 allocation_mode, nic_type)
4794 # Stub for ip_address feature
4795 if ip_address:
4796 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4797 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4798
4799 if mac_address:
4800 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4801 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4802
4803 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
4804 else:
4805 new_item = """<NetworkConnection network="{}">
4806 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4807 <IsConnected>true</IsConnected>
4808 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4809 <NetworkAdapterType>{}</NetworkAdapterType>
4810 </NetworkConnection>""".format(network_name, nicIndex,
4811 allocation_mode, nic_type)
4812 # Stub for ip_address feature
4813 if ip_address:
4814 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4815 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4816
4817 if mac_address:
4818 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4819 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4820
4821 data = data + new_item + '</NetworkConnectionSection>'
4822
4823 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4824
4825 response = self.perform_request(req_type='PUT',
4826 url=url_rest_call,
4827 headers=headers,
4828 data=data)
4829
4830 if response.status_code == 403:
4831 add_headers = {'Content-Type': headers['Content-Type']}
4832 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
4833
4834 if response.status_code != 202:
4835 self.logger.error("REST call {} failed reason : {}"\
4836 "status code : {}".format(url_rest_call,
4837 response.content,
4838 response.status_code))
4839 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
4840 "network connection section")
4841 else:
4842 nic_task = self.get_task_from_response(response.content)
4843 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4844 if result.get('status') == 'success':
4845 self.logger.info("add_network_adapter_to_vms(): VM {} "\
4846 "conneced to NIC type {}".format(vm_id, nic_type))
4847 else:
4848 self.logger.error("add_network_adapter_to_vms(): VM {} "\
4849 "failed to connect NIC type {}".format(vm_id, nic_type))
4850 except Exception as exp:
4851 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
4852 "while adding Network adapter")
4853 raise vimconn.vimconnException(message=exp)
4854
4855
4856 def set_numa_affinity(self, vmuuid, paired_threads_id):
4857 """
4858 Method to assign numa affinity in vm configuration parammeters
4859 Args :
4860 vmuuid - vm uuid
4861 paired_threads_id - one or more virtual processor
4862 numbers
4863 Returns:
4864 return if True
4865 """
4866 try:
4867 vcenter_conect, content = self.get_vcenter_content()
4868 vm_moref_id = self.get_vm_moref_id(vmuuid)
4869
4870 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4871 if vm_obj:
4872 config_spec = vim.vm.ConfigSpec()
4873 config_spec.extraConfig = []
4874 opt = vim.option.OptionValue()
4875 opt.key = 'numa.nodeAffinity'
4876 opt.value = str(paired_threads_id)
4877 config_spec.extraConfig.append(opt)
4878 task = vm_obj.ReconfigVM_Task(config_spec)
4879 if task:
4880 result = self.wait_for_vcenter_task(task, vcenter_conect)
4881 extra_config = vm_obj.config.extraConfig
4882 flag = False
4883 for opts in extra_config:
4884 if 'numa.nodeAffinity' in opts.key:
4885 flag = True
4886 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4887 "value {} for vm {}".format(opt.value, vm_obj))
4888 if flag:
4889 return
4890 else:
4891 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4892 except Exception as exp:
4893 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4894 "for VM {} : {}".format(vm_obj, vm_moref_id))
4895 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4896 "affinity".format(exp))
4897
4898
4899 def cloud_init(self, vapp, cloud_config):
4900 """
4901 Method to inject ssh-key
4902 vapp - vapp object
4903 cloud_config a dictionary with:
4904 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4905 'users': (optional) list of users to be inserted, each item is a dict with:
4906 'name': (mandatory) user name,
4907 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4908 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4909 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4910 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4911 'dest': (mandatory) string with the destination absolute path
4912 'encoding': (optional, by default text). Can be one of:
4913 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4914 'content' (mandatory): string with the content of the file
4915 'permissions': (optional) string with file permissions, typically octal notation '0644'
4916 'owner': (optional) file owner, string with the format 'owner:group'
4917 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4918 """
4919 try:
4920 if not isinstance(cloud_config, dict):
4921 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4922 else:
4923 key_pairs = []
4924 userdata = []
4925 if "key-pairs" in cloud_config:
4926 key_pairs = cloud_config["key-pairs"]
4927
4928 if "users" in cloud_config:
4929 userdata = cloud_config["users"]
4930
4931 self.logger.debug("cloud_init : Guest os customization started..")
4932 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4933 customize_script = customize_script.replace("&","&amp;")
4934 self.guest_customization(vapp, customize_script)
4935
4936 except Exception as exp:
4937 self.logger.error("cloud_init : exception occurred while injecting "\
4938 "ssh-key")
4939 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4940 "ssh-key".format(exp))
4941
4942 def format_script(self, key_pairs=[], users_list=[]):
4943 bash_script = """#!/bin/sh
4944 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4945 if [ "$1" = "precustomization" ];then
4946 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4947 """
4948
4949 keys = "\n".join(key_pairs)
4950 if keys:
4951 keys_data = """
4952 if [ ! -d /root/.ssh ];then
4953 mkdir /root/.ssh
4954 chown root:root /root/.ssh
4955 chmod 700 /root/.ssh
4956 touch /root/.ssh/authorized_keys
4957 chown root:root /root/.ssh/authorized_keys
4958 chmod 600 /root/.ssh/authorized_keys
4959 # make centos with selinux happy
4960 which restorecon && restorecon -Rv /root/.ssh
4961 else
4962 touch /root/.ssh/authorized_keys
4963 chown root:root /root/.ssh/authorized_keys
4964 chmod 600 /root/.ssh/authorized_keys
4965 fi
4966 echo '{key}' >> /root/.ssh/authorized_keys
4967 """.format(key=keys)
4968
4969 bash_script+= keys_data
4970
4971 for user in users_list:
4972 if 'name' in user: user_name = user['name']
4973 if 'key-pairs' in user:
4974 user_keys = "\n".join(user['key-pairs'])
4975 else:
4976 user_keys = None
4977
4978 add_user_name = """
4979 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4980 """.format(user_name=user_name)
4981
4982 bash_script+= add_user_name
4983
4984 if user_keys:
4985 user_keys_data = """
4986 mkdir /home/{user_name}/.ssh
4987 chown {user_name}:{user_name} /home/{user_name}/.ssh
4988 chmod 700 /home/{user_name}/.ssh
4989 touch /home/{user_name}/.ssh/authorized_keys
4990 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4991 chmod 600 /home/{user_name}/.ssh/authorized_keys
4992 # make centos with selinux happy
4993 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4994 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4995 """.format(user_name=user_name,user_key=user_keys)
4996
4997 bash_script+= user_keys_data
4998
4999 return bash_script+"\n\tfi"
5000
5001 def guest_customization(self, vapp, customize_script):
5002 """
5003 Method to customize guest os
5004 vapp - Vapp object
5005 customize_script - Customize script to be run at first boot of VM.
5006 """
5007 for vm in vapp.get_all_vms():
5008 vm_id = vm.get('id').split(':')[-1]
5009 vm_name = vm.get('name')
5010 vm_name = vm_name.replace('_','-')
5011
5012 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5013 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5014 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5015
5016 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5017
5018 data = """<GuestCustomizationSection
5019 xmlns="http://www.vmware.com/vcloud/v1.5"
5020 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5021 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5022 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5023 <Enabled>true</Enabled>
5024 <ChangeSid>false</ChangeSid>
5025 <VirtualMachineId>{}</VirtualMachineId>
5026 <JoinDomainEnabled>false</JoinDomainEnabled>
5027 <UseOrgSettings>false</UseOrgSettings>
5028 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5029 <AdminPasswordAuto>true</AdminPasswordAuto>
5030 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5031 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5032 <ResetPasswordRequired>false</ResetPasswordRequired>
5033 <CustomizationScript>{}</CustomizationScript>
5034 <ComputerName>{}</ComputerName>
5035 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5036 </GuestCustomizationSection>
5037 """.format(vm_customization_url,
5038 vm_id,
5039 customize_script,
5040 vm_name,
5041 vm_customization_url)
5042
5043 response = self.perform_request(req_type='PUT',
5044 url=vm_customization_url,
5045 headers=headers,
5046 data=data)
5047 if response.status_code == 202:
5048 guest_task = self.get_task_from_response(response.content)
5049 self.client.get_task_monitor().wait_for_success(task=guest_task)
5050 self.logger.info("guest_customization : customized guest os task "\
5051 "completed for VM {}".format(vm_name))
5052 else:
5053 self.logger.error("guest_customization : task for customized guest os"\
5054 "failed for VM {}".format(vm_name))
5055 raise vimconn.vimconnException("guest_customization : failed to perform"\
5056 "guest os customization on VM {}".format(vm_name))
5057
5058 def add_new_disk(self, vapp_uuid, disk_size):
5059 """
5060 Method to create an empty vm disk
5061
5062 Args:
5063 vapp_uuid - is vapp identifier.
5064 disk_size - size of disk to be created in GB
5065
5066 Returns:
5067 None
5068 """
5069 status = False
5070 vm_details = None
5071 try:
5072 #Disk size in GB, convert it into MB
5073 if disk_size is not None:
5074 disk_size_mb = int(disk_size) * 1024
5075 vm_details = self.get_vapp_details_rest(vapp_uuid)
5076
5077 if vm_details and "vm_virtual_hardware" in vm_details:
5078 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5079 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5080 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5081
5082 except Exception as exp:
5083 msg = "Error occurred while creating new disk {}.".format(exp)
5084 self.rollback_newvm(vapp_uuid, msg)
5085
5086 if status:
5087 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5088 else:
5089 #If failed to add disk, delete VM
5090 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5091 self.rollback_newvm(vapp_uuid, msg)
5092
5093
5094 def add_new_disk_rest(self, disk_href, disk_size_mb):
5095 """
5096 Retrives vApp Disks section & add new empty disk
5097
5098 Args:
5099 disk_href: Disk section href to addd disk
5100 disk_size_mb: Disk size in MB
5101
5102 Returns: Status of add new disk task
5103 """
5104 status = False
5105 if self.client._session:
5106 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5107 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5108 response = self.perform_request(req_type='GET',
5109 url=disk_href,
5110 headers=headers)
5111
5112 if response.status_code == 403:
5113 response = self.retry_rest('GET', disk_href)
5114
5115 if response.status_code != requests.codes.ok:
5116 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5117 .format(disk_href, response.status_code))
5118 return status
5119 try:
5120 #Find but type & max of instance IDs assigned to disks
5121 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5122 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
5123 #For python3
5124 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
5125 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5126 instance_id = 0
5127 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5128 if item.find("rasd:Description",namespaces).text == "Hard disk":
5129 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5130 if inst_id > instance_id:
5131 instance_id = inst_id
5132 disk_item = item.find("rasd:HostResource" ,namespaces)
5133 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5134 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5135
5136 instance_id = instance_id + 1
5137 new_item = """<Item>
5138 <rasd:Description>Hard disk</rasd:Description>
5139 <rasd:ElementName>New disk</rasd:ElementName>
5140 <rasd:HostResource
5141 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5142 vcloud:capacity="{}"
5143 vcloud:busSubType="{}"
5144 vcloud:busType="{}"></rasd:HostResource>
5145 <rasd:InstanceID>{}</rasd:InstanceID>
5146 <rasd:ResourceType>17</rasd:ResourceType>
5147 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5148
5149 new_data = response.content
5150 #Add new item at the bottom
5151 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5152
5153 # Send PUT request to modify virtual hardware section with new disk
5154 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5155
5156 response = self.perform_request(req_type='PUT',
5157 url=disk_href,
5158 data=new_data,
5159 headers=headers)
5160
5161 if response.status_code == 403:
5162 add_headers = {'Content-Type': headers['Content-Type']}
5163 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5164
5165 if response.status_code != 202:
5166 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5167 .format(disk_href, response.status_code, response.content))
5168 else:
5169 add_disk_task = self.get_task_from_response(response.content)
5170 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5171 if result.get('status') == 'success':
5172 status = True
5173 else:
5174 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5175
5176 except Exception as exp:
5177 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5178
5179 return status
5180
5181
5182 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5183 """
5184 Method to add existing disk to vm
5185 Args :
5186 catalogs - List of VDC catalogs
5187 image_id - Catalog ID
5188 template_name - Name of template in catalog
5189 vapp_uuid - UUID of vApp
5190 Returns:
5191 None
5192 """
5193 disk_info = None
5194 vcenter_conect, content = self.get_vcenter_content()
5195 #find moref-id of vm in image
5196 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5197 image_id=image_id,
5198 )
5199
5200 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5201 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5202 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5203 if catalog_vm_moref_id:
5204 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5205 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5206 if catalog_vm_obj:
5207 #find existing disk
5208 disk_info = self.find_disk(catalog_vm_obj)
5209 else:
5210 exp_msg = "No VM with image id {} found".format(image_id)
5211 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5212 else:
5213 exp_msg = "No Image found with image ID {} ".format(image_id)
5214 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5215
5216 if disk_info:
5217 self.logger.info("Existing disk_info : {}".format(disk_info))
5218 #get VM
5219 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5220 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5221 if vm_obj:
5222 status = self.add_disk(vcenter_conect=vcenter_conect,
5223 vm=vm_obj,
5224 disk_info=disk_info,
5225 size=size,
5226 vapp_uuid=vapp_uuid
5227 )
5228 if status:
5229 self.logger.info("Disk from image id {} added to {}".format(image_id,
5230 vm_obj.config.name)
5231 )
5232 else:
5233 msg = "No disk found with image id {} to add in VM {}".format(
5234 image_id,
5235 vm_obj.config.name)
5236 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5237
5238
5239 def find_disk(self, vm_obj):
5240 """
5241 Method to find details of existing disk in VM
5242 Args :
5243 vm_obj - vCenter object of VM
5244 image_id - Catalog ID
5245 Returns:
5246 disk_info : dict of disk details
5247 """
5248 disk_info = {}
5249 if vm_obj:
5250 try:
5251 devices = vm_obj.config.hardware.device
5252 for device in devices:
5253 if type(device) is vim.vm.device.VirtualDisk:
5254 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5255 disk_info["full_path"] = device.backing.fileName
5256 disk_info["datastore"] = device.backing.datastore
5257 disk_info["capacityKB"] = device.capacityInKB
5258 break
5259 except Exception as exp:
5260 self.logger.error("find_disk() : exception occurred while "\
5261 "getting existing disk details :{}".format(exp))
5262 return disk_info
5263
5264
5265 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5266 """
5267 Method to add existing disk in VM
5268 Args :
5269 vcenter_conect - vCenter content object
5270 vm - vCenter vm object
5271 disk_info : dict of disk details
5272 Returns:
5273 status : status of add disk task
5274 """
5275 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5276 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5277 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5278 if size is not None:
5279 #Convert size from GB to KB
5280 sizeKB = int(size) * 1024 * 1024
5281 #compare size of existing disk and user given size.Assign whicherver is greater
5282 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5283 sizeKB, capacityKB))
5284 if sizeKB > capacityKB:
5285 capacityKB = sizeKB
5286
5287 if datastore and fullpath and capacityKB:
5288 try:
5289 spec = vim.vm.ConfigSpec()
5290 # get all disks on a VM, set unit_number to the next available
5291 unit_number = 0
5292 for dev in vm.config.hardware.device:
5293 if hasattr(dev.backing, 'fileName'):
5294 unit_number = int(dev.unitNumber) + 1
5295 # unit_number 7 reserved for scsi controller
5296 if unit_number == 7:
5297 unit_number += 1
5298 if isinstance(dev, vim.vm.device.VirtualDisk):
5299 #vim.vm.device.VirtualSCSIController
5300 controller_key = dev.controllerKey
5301
5302 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5303 unit_number, controller_key))
5304 # add disk here
5305 dev_changes = []
5306 disk_spec = vim.vm.device.VirtualDeviceSpec()
5307 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5308 disk_spec.device = vim.vm.device.VirtualDisk()
5309 disk_spec.device.backing = \
5310 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5311 disk_spec.device.backing.thinProvisioned = True
5312 disk_spec.device.backing.diskMode = 'persistent'
5313 disk_spec.device.backing.datastore = datastore
5314 disk_spec.device.backing.fileName = fullpath
5315
5316 disk_spec.device.unitNumber = unit_number
5317 disk_spec.device.capacityInKB = capacityKB
5318 disk_spec.device.controllerKey = controller_key
5319 dev_changes.append(disk_spec)
5320 spec.deviceChange = dev_changes
5321 task = vm.ReconfigVM_Task(spec=spec)
5322 status = self.wait_for_vcenter_task(task, vcenter_conect)
5323 return status
5324 except Exception as exp:
5325 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5326 "{} to vm {}".format(exp,
5327 fullpath,
5328 vm.config.name)
5329 self.rollback_newvm(vapp_uuid, exp_msg)
5330 else:
5331 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5332 self.rollback_newvm(vapp_uuid, msg)
5333
5334
5335 def get_vcenter_content(self):
5336 """
5337 Get the vsphere content object
5338 """
5339 try:
5340 vm_vcenter_info = self.get_vm_vcenter_info()
5341 except Exception as exp:
5342 self.logger.error("Error occurred while getting vCenter infromationn"\
5343 " for VM : {}".format(exp))
5344 raise vimconn.vimconnException(message=exp)
5345
5346 context = None
5347 if hasattr(ssl, '_create_unverified_context'):
5348 context = ssl._create_unverified_context()
5349
5350 vcenter_conect = SmartConnect(
5351 host=vm_vcenter_info["vm_vcenter_ip"],
5352 user=vm_vcenter_info["vm_vcenter_user"],
5353 pwd=vm_vcenter_info["vm_vcenter_password"],
5354 port=int(vm_vcenter_info["vm_vcenter_port"]),
5355 sslContext=context
5356 )
5357 atexit.register(Disconnect, vcenter_conect)
5358 content = vcenter_conect.RetrieveContent()
5359 return vcenter_conect, content
5360
5361
5362 def get_vm_moref_id(self, vapp_uuid):
5363 """
5364 Get the moref_id of given VM
5365 """
5366 try:
5367 if vapp_uuid:
5368 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5369 if vm_details and "vm_vcenter_info" in vm_details:
5370 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5371 return vm_moref_id
5372
5373 except Exception as exp:
5374 self.logger.error("Error occurred while getting VM moref ID "\
5375 " for VM : {}".format(exp))
5376 return None
5377
5378
5379 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5380 """
5381 Method to get vApp template details
5382 Args :
5383 catalogs - list of VDC catalogs
5384 image_id - Catalog ID to find
5385 template_name : template name in catalog
5386 Returns:
5387 parsed_respond : dict of vApp tempalte details
5388 """
5389 parsed_response = {}
5390
5391 vca = self.connect_as_admin()
5392 if not vca:
5393 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5394
5395 try:
5396 org, vdc = self.get_vdc_details()
5397 catalog = self.get_catalog_obj(image_id, catalogs)
5398 if catalog:
5399 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5400 catalog_items = [items.attrib]
5401
5402 if len(catalog_items) == 1:
5403 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5404 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5405
5406 response = self.perform_request(req_type='GET',
5407 url=catalog_items[0].get('href'),
5408 headers=headers)
5409 catalogItem = XmlElementTree.fromstring(response.content)
5410 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5411 vapp_tempalte_href = entity.get("href")
5412 #get vapp details and parse moref id
5413
5414 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5415 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5416 'vmw': 'http://www.vmware.com/schema/ovf',
5417 'vm': 'http://www.vmware.com/vcloud/v1.5',
5418 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5419 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5420 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5421 }
5422
5423 if vca._session:
5424 response = self.perform_request(req_type='GET',
5425 url=vapp_tempalte_href,
5426 headers=headers)
5427
5428 if response.status_code != requests.codes.ok:
5429 self.logger.debug("REST API call {} failed. Return status code {}".format(
5430 vapp_tempalte_href, response.status_code))
5431
5432 else:
5433 xmlroot_respond = XmlElementTree.fromstring(response.content)
5434 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5435 if children_section is not None:
5436 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5437 if vCloud_extension_section is not None:
5438 vm_vcenter_info = {}
5439 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5440 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5441 if vmext is not None:
5442 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5443 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5444
5445 except Exception as exp :
5446 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5447
5448 return parsed_response
5449
5450
5451 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5452 """
5453 Method to delete vApp
5454 Args :
5455 vapp_uuid - vApp UUID
5456 msg - Error message to be logged
5457 exp_type : Exception type
5458 Returns:
5459 None
5460 """
5461 if vapp_uuid:
5462 status = self.delete_vminstance(vapp_uuid)
5463 else:
5464 msg = "No vApp ID"
5465 self.logger.error(msg)
5466 if exp_type == "Genric":
5467 raise vimconn.vimconnException(msg)
5468 elif exp_type == "NotFound":
5469 raise vimconn.vimconnNotFoundException(message=msg)
5470
5471 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5472 """
5473 Method to attach SRIOV adapters to VM
5474
5475 Args:
5476 vapp_uuid - uuid of vApp/VM
5477 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5478 vmname_andid - vmname
5479
5480 Returns:
5481 The status of add SRIOV adapter task , vm object and
5482 vcenter_conect object
5483 """
5484 vm_obj = None
5485 vcenter_conect, content = self.get_vcenter_content()
5486 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5487
5488 if vm_moref_id:
5489 try:
5490 no_of_sriov_devices = len(sriov_nets)
5491 if no_of_sriov_devices > 0:
5492 #Get VM and its host
5493 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5494 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5495 if host_obj and vm_obj:
5496 #get SRIOV devies from host on which vapp is currently installed
5497 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5498 no_of_sriov_devices,
5499 )
5500
5501 if len(avilable_sriov_devices) == 0:
5502 #find other hosts with active pci devices
5503 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5504 content,
5505 no_of_sriov_devices,
5506 )
5507
5508 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5509 #Migrate vm to the host where SRIOV devices are available
5510 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5511 new_host_obj))
5512 task = self.relocate_vm(new_host_obj, vm_obj)
5513 if task is not None:
5514 result = self.wait_for_vcenter_task(task, vcenter_conect)
5515 self.logger.info("Migrate VM status: {}".format(result))
5516 host_obj = new_host_obj
5517 else:
5518 self.logger.info("Fail to migrate VM : {}".format(result))
5519 raise vimconn.vimconnNotFoundException(
5520 "Fail to migrate VM : {} to host {}".format(
5521 vmname_andid,
5522 new_host_obj)
5523 )
5524
5525 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5526 #Add SRIOV devices one by one
5527 for sriov_net in sriov_nets:
5528 network_name = sriov_net.get('net_id')
5529 dvs_portgr_name = self.create_dvPort_group(network_name)
5530 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
5531 #add vlan ID ,Modify portgroup for vlan ID
5532 self.configure_vlanID(content, vcenter_conect, network_name)
5533
5534 task = self.add_sriov_to_vm(content,
5535 vm_obj,
5536 host_obj,
5537 network_name,
5538 avilable_sriov_devices[0]
5539 )
5540 if task:
5541 status= self.wait_for_vcenter_task(task, vcenter_conect)
5542 if status:
5543 self.logger.info("Added SRIOV {} to VM {}".format(
5544 no_of_sriov_devices,
5545 str(vm_obj)))
5546 else:
5547 self.logger.error("Fail to add SRIOV {} to VM {}".format(
5548 no_of_sriov_devices,
5549 str(vm_obj)))
5550 raise vimconn.vimconnUnexpectedResponse(
5551 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
5552 )
5553 return True, vm_obj, vcenter_conect
5554 else:
5555 self.logger.error("Currently there is no host with"\
5556 " {} number of avaialble SRIOV "\
5557 "VFs required for VM {}".format(
5558 no_of_sriov_devices,
5559 vmname_andid)
5560 )
5561 raise vimconn.vimconnNotFoundException(
5562 "Currently there is no host with {} "\
5563 "number of avaialble SRIOV devices required for VM {}".format(
5564 no_of_sriov_devices,
5565 vmname_andid))
5566 else:
5567 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
5568
5569 except vmodl.MethodFault as error:
5570 self.logger.error("Error occurred while adding SRIOV {} ",error)
5571 return None, vm_obj, vcenter_conect
5572
5573
5574 def get_sriov_devices(self,host, no_of_vfs):
5575 """
5576 Method to get the details of SRIOV devices on given host
5577 Args:
5578 host - vSphere host object
5579 no_of_vfs - number of VFs needed on host
5580
5581 Returns:
5582 array of SRIOV devices
5583 """
5584 sriovInfo=[]
5585 if host:
5586 for device in host.config.pciPassthruInfo:
5587 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
5588 if device.numVirtualFunction >= no_of_vfs:
5589 sriovInfo.append(device)
5590 break
5591 return sriovInfo
5592
5593
5594 def get_host_and_sriov_devices(self, content, no_of_vfs):
5595 """
5596 Method to get the details of SRIOV devices infromation on all hosts
5597
5598 Args:
5599 content - vSphere host object
5600 no_of_vfs - number of pci VFs needed on host
5601
5602 Returns:
5603 array of SRIOV devices and host object
5604 """
5605 host_obj = None
5606 sriov_device_objs = None
5607 try:
5608 if content:
5609 container = content.viewManager.CreateContainerView(content.rootFolder,
5610 [vim.HostSystem], True)
5611 for host in container.view:
5612 devices = self.get_sriov_devices(host, no_of_vfs)
5613 if devices:
5614 host_obj = host
5615 sriov_device_objs = devices
5616 break
5617 except Exception as exp:
5618 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
5619
5620 return host_obj,sriov_device_objs
5621
5622
5623 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
5624 """
5625 Method to add SRIOV adapter to vm
5626
5627 Args:
5628 host_obj - vSphere host object
5629 vm_obj - vSphere vm object
5630 content - vCenter content object
5631 network_name - name of distributed virtaul portgroup
5632 sriov_device - SRIOV device info
5633
5634 Returns:
5635 task object
5636 """
5637 devices = []
5638 vnic_label = "sriov nic"
5639 try:
5640 dvs_portgr = self.get_dvport_group(network_name)
5641 network_name = dvs_portgr.name
5642 nic = vim.vm.device.VirtualDeviceSpec()
5643 # VM device
5644 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5645 nic.device = vim.vm.device.VirtualSriovEthernetCard()
5646 nic.device.addressType = 'assigned'
5647 #nic.device.key = 13016
5648 nic.device.deviceInfo = vim.Description()
5649 nic.device.deviceInfo.label = vnic_label
5650 nic.device.deviceInfo.summary = network_name
5651 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
5652
5653 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
5654 nic.device.backing.deviceName = network_name
5655 nic.device.backing.useAutoDetect = False
5656 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
5657 nic.device.connectable.startConnected = True
5658 nic.device.connectable.allowGuestControl = True
5659
5660 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
5661 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
5662 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
5663
5664 devices.append(nic)
5665 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
5666 task = vm_obj.ReconfigVM_Task(vmconf)
5667 return task
5668 except Exception as exp:
5669 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
5670 return None
5671
5672
5673 def create_dvPort_group(self, network_name):
5674 """
5675 Method to create disributed virtual portgroup
5676
5677 Args:
5678 network_name - name of network/portgroup
5679
5680 Returns:
5681 portgroup key
5682 """
5683 try:
5684 new_network_name = [network_name, '-', str(uuid.uuid4())]
5685 network_name=''.join(new_network_name)
5686 vcenter_conect, content = self.get_vcenter_content()
5687
5688 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
5689 if dv_switch:
5690 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5691 dv_pg_spec.name = network_name
5692
5693 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
5694 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5695 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
5696 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
5697 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
5698 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
5699
5700 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
5701 self.wait_for_vcenter_task(task, vcenter_conect)
5702
5703 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
5704 if dvPort_group:
5705 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
5706 return dvPort_group.key
5707 else:
5708 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
5709
5710 except Exception as exp:
5711 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
5712 " : {}".format(network_name, exp))
5713 return None
5714
5715 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
5716 """
5717 Method to reconfigure disributed virtual portgroup
5718
5719 Args:
5720 dvPort_group_name - name of disributed virtual portgroup
5721 content - vCenter content object
5722 config_info - disributed virtual portgroup configuration
5723
5724 Returns:
5725 task object
5726 """
5727 try:
5728 dvPort_group = self.get_dvport_group(dvPort_group_name)
5729 if dvPort_group:
5730 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5731 dv_pg_spec.configVersion = dvPort_group.config.configVersion
5732 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5733 if "vlanID" in config_info:
5734 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
5735 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
5736
5737 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
5738 return task
5739 else:
5740 return None
5741 except Exception as exp:
5742 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
5743 " : {}".format(dvPort_group_name, exp))
5744 return None
5745
5746
5747 def destroy_dvport_group(self , dvPort_group_name):
5748 """
5749 Method to destroy disributed virtual portgroup
5750
5751 Args:
5752 network_name - name of network/portgroup
5753
5754 Returns:
5755 True if portgroup successfully got deleted else false
5756 """
5757 vcenter_conect, content = self.get_vcenter_content()
5758 try:
5759 status = None
5760 dvPort_group = self.get_dvport_group(dvPort_group_name)
5761 if dvPort_group:
5762 task = dvPort_group.Destroy_Task()
5763 status = self.wait_for_vcenter_task(task, vcenter_conect)
5764 return status
5765 except vmodl.MethodFault as exp:
5766 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
5767 exp, dvPort_group_name))
5768 return None
5769
5770
5771 def get_dvport_group(self, dvPort_group_name):
5772 """
5773 Method to get disributed virtual portgroup
5774
5775 Args:
5776 network_name - name of network/portgroup
5777
5778 Returns:
5779 portgroup object
5780 """
5781 vcenter_conect, content = self.get_vcenter_content()
5782 dvPort_group = None
5783 try:
5784 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
5785 for item in container.view:
5786 if item.key == dvPort_group_name:
5787 dvPort_group = item
5788 break
5789 return dvPort_group
5790 except vmodl.MethodFault as exp:
5791 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
5792 exp, dvPort_group_name))
5793 return None
5794
5795 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
5796 """
5797 Method to get disributed virtual portgroup vlanID
5798
5799 Args:
5800 network_name - name of network/portgroup
5801
5802 Returns:
5803 vlan ID
5804 """
5805 vlanId = None
5806 try:
5807 dvPort_group = self.get_dvport_group(dvPort_group_name)
5808 if dvPort_group:
5809 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
5810 except vmodl.MethodFault as exp:
5811 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
5812 exp, dvPort_group_name))
5813 return vlanId
5814
5815
5816 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
5817 """
5818 Method to configure vlanID in disributed virtual portgroup vlanID
5819
5820 Args:
5821 network_name - name of network/portgroup
5822
5823 Returns:
5824 None
5825 """
5826 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
5827 if vlanID == 0:
5828 #configure vlanID
5829 vlanID = self.genrate_vlanID(dvPort_group_name)
5830 config = {"vlanID":vlanID}
5831 task = self.reconfig_portgroup(content, dvPort_group_name,
5832 config_info=config)
5833 if task:
5834 status= self.wait_for_vcenter_task(task, vcenter_conect)
5835 if status:
5836 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
5837 dvPort_group_name,vlanID))
5838 else:
5839 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
5840 dvPort_group_name, vlanID))
5841
5842
5843 def genrate_vlanID(self, network_name):
5844 """
5845 Method to get unused vlanID
5846 Args:
5847 network_name - name of network/portgroup
5848 Returns:
5849 vlanID
5850 """
5851 vlan_id = None
5852 used_ids = []
5853 if self.config.get('vlanID_range') == None:
5854 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
5855 "at config value before creating sriov network with vlan tag")
5856 if "used_vlanIDs" not in self.persistent_info:
5857 self.persistent_info["used_vlanIDs"] = {}
5858 else:
5859 used_ids = self.persistent_info["used_vlanIDs"].values()
5860 #For python3
5861 #used_ids = list(self.persistent_info["used_vlanIDs"].values())
5862
5863 for vlanID_range in self.config.get('vlanID_range'):
5864 start_vlanid , end_vlanid = vlanID_range.split("-")
5865 if start_vlanid > end_vlanid:
5866 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
5867 vlanID_range))
5868
5869 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
5870 #For python3
5871 #for id in range(int(start_vlanid), int(end_vlanid) + 1):
5872 if id not in used_ids:
5873 vlan_id = id
5874 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
5875 return vlan_id
5876 if vlan_id is None:
5877 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
5878
5879
5880 def get_obj(self, content, vimtype, name):
5881 """
5882 Get the vsphere object associated with a given text name
5883 """
5884 obj = None
5885 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
5886 for item in container.view:
5887 if item.name == name:
5888 obj = item
5889 break
5890 return obj
5891
5892
5893 def insert_media_to_vm(self, vapp, image_id):
5894 """
5895 Method to insert media CD-ROM (ISO image) from catalog to vm.
5896 vapp - vapp object to get vm id
5897 Image_id - image id for cdrom to be inerted to vm
5898 """
5899 # create connection object
5900 vca = self.connect()
5901 try:
5902 # fetching catalog details
5903 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
5904 if vca._session:
5905 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5906 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5907 response = self.perform_request(req_type='GET',
5908 url=rest_url,
5909 headers=headers)
5910
5911 if response.status_code != 200:
5912 self.logger.error("REST call {} failed reason : {}"\
5913 "status code : {}".format(url_rest_call,
5914 response.content,
5915 response.status_code))
5916 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5917 "catalog details")
5918 # searching iso name and id
5919 iso_name,media_id = self.get_media_details(vca, response.content)
5920
5921 if iso_name and media_id:
5922 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5923 <ns6:MediaInsertOrEjectParams
5924 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5925 <ns6:Media
5926 type="application/vnd.vmware.vcloud.media+xml"
5927 name="{}.iso"
5928 id="urn:vcloud:media:{}"
5929 href="https://{}/api/media/{}"/>
5930 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5931 self.url,media_id)
5932
5933 for vms in vapp.get_all_vms():
5934 vm_id = vms.get('id').split(':')[-1]
5935
5936 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5937 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
5938
5939 response = self.perform_request(req_type='POST',
5940 url=rest_url,
5941 data=data,
5942 headers=headers)
5943
5944 if response.status_code != 202:
5945 self.logger.error("Failed to insert CD-ROM to vm")
5946 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5947 "ISO image to vm")
5948 else:
5949 task = self.get_task_from_response(response.content)
5950 result = self.client.get_task_monitor().wait_for_success(task=task)
5951 if result.get('status') == 'success':
5952 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5953 " image to vm {}".format(vm_id))
5954
5955 except Exception as exp:
5956 self.logger.error("insert_media_to_vm() : exception occurred "\
5957 "while inserting media CD-ROM")
5958 raise vimconn.vimconnException(message=exp)
5959
5960
5961 def get_media_details(self, vca, content):
5962 """
5963 Method to get catalog item details
5964 vca - connection object
5965 content - Catalog details
5966 Return - Media name, media id
5967 """
5968 cataloghref_list = []
5969 try:
5970 if content:
5971 vm_list_xmlroot = XmlElementTree.fromstring(content)
5972 for child in vm_list_xmlroot.iter():
5973 if 'CatalogItem' in child.tag:
5974 cataloghref_list.append(child.attrib.get('href'))
5975 if cataloghref_list is not None:
5976 for href in cataloghref_list:
5977 if href:
5978 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5979 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5980 response = self.perform_request(req_type='GET',
5981 url=href,
5982 headers=headers)
5983 if response.status_code != 200:
5984 self.logger.error("REST call {} failed reason : {}"\
5985 "status code : {}".format(href,
5986 response.content,
5987 response.status_code))
5988 raise vimconn.vimconnException("get_media_details : Failed to get "\
5989 "catalogitem details")
5990 list_xmlroot = XmlElementTree.fromstring(response.content)
5991 for child in list_xmlroot.iter():
5992 if 'Entity' in child.tag:
5993 if 'media' in child.attrib.get('href'):
5994 name = child.attrib.get('name')
5995 media_id = child.attrib.get('href').split('/').pop()
5996 return name,media_id
5997 else:
5998 self.logger.debug("Media name and id not found")
5999 return False,False
6000 except Exception as exp:
6001 self.logger.error("get_media_details : exception occurred "\
6002 "getting media details")
6003 raise vimconn.vimconnException(message=exp)
6004
6005
6006 def retry_rest(self, method, url, add_headers=None, data=None):
6007 """ Method to get Token & retry respective REST request
6008 Args:
6009 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6010 url - request url to be used
6011 add_headers - Additional headers (optional)
6012 data - Request payload data to be passed in request
6013 Returns:
6014 response - Response of request
6015 """
6016 response = None
6017
6018 #Get token
6019 self.get_token()
6020
6021 if self.client._session:
6022 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6023 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6024
6025 if add_headers:
6026 headers.update(add_headers)
6027
6028 if method == 'GET':
6029 response = self.perform_request(req_type='GET',
6030 url=url,
6031 headers=headers)
6032 elif method == 'PUT':
6033 response = self.perform_request(req_type='PUT',
6034 url=url,
6035 headers=headers,
6036 data=data)
6037 elif method == 'POST':
6038 response = self.perform_request(req_type='POST',
6039 url=url,
6040 headers=headers,
6041 data=data)
6042 elif method == 'DELETE':
6043 response = self.perform_request(req_type='DELETE',
6044 url=url,
6045 headers=headers)
6046 return response
6047
6048
6049 def get_token(self):
6050 """ Generate a new token if expired
6051
6052 Returns:
6053 The return client object that letter can be used to connect to vCloud director as admin for VDC
6054 """
6055 try:
6056 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6057 self.user,
6058 self.org_name))
6059 host = self.url
6060 client = Client(host, verify_ssl_certs=False)
6061 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6062 # connection object
6063 self.client = client
6064
6065 except:
6066 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6067 "{} as user: {}".format(self.org_name, self.user))
6068
6069 if not client:
6070 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6071
6072
6073 def get_vdc_details(self):
6074 """ Get VDC details using pyVcloud Lib
6075
6076 Returns org and vdc object
6077 """
6078 org = Org(self.client, resource=self.client.get_org())
6079 vdc = org.get_vdc(self.tenant_name)
6080
6081 #Retry once, if failed by refreshing token
6082 if vdc is None:
6083 self.get_token()
6084 vdc = org.get_vdc(self.tenant_name)
6085
6086 return org, vdc
6087
6088
6089 def perform_request(self, req_type, url, headers=None, data=None):
6090 """Perform the POST/PUT/GET/DELETE request."""
6091
6092 #Log REST request details
6093 self.log_request(req_type, url=url, headers=headers, data=data)
6094 # perform request and return its result
6095 if req_type == 'GET':
6096 response = requests.get(url=url,
6097 headers=headers,
6098 verify=False)
6099 elif req_type == 'PUT':
6100 response = requests.put(url=url,
6101 headers=headers,
6102 data=data,
6103 verify=False)
6104 elif req_type == 'POST':
6105 response = requests.post(url=url,
6106 headers=headers,
6107 data=data,
6108 verify=False)
6109 elif req_type == 'DELETE':
6110 response = requests.delete(url=url,
6111 headers=headers,
6112 verify=False)
6113 #Log the REST response
6114 self.log_response(response)
6115
6116 return response
6117
6118
6119 def log_request(self, req_type, url=None, headers=None, data=None):
6120 """Logs REST request details"""
6121
6122 if req_type is not None:
6123 self.logger.debug("Request type: {}".format(req_type))
6124
6125 if url is not None:
6126 self.logger.debug("Request url: {}".format(url))
6127
6128 if headers is not None:
6129 for header in headers:
6130 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6131
6132 if data is not None:
6133 self.logger.debug("Request data: {}".format(data))
6134
6135
6136 def log_response(self, response):
6137 """Logs REST response details"""
6138
6139 self.logger.debug("Response status code: {} ".format(response.status_code))
6140
6141
6142 def get_task_from_response(self, content):
6143 """
6144 content - API response content(response.content)
6145 return task object
6146 """
6147 xmlroot = XmlElementTree.fromstring(content)
6148 if xmlroot.tag.split('}')[1] == "Task":
6149 return xmlroot
6150 else:
6151 for ele in xmlroot:
6152 if ele.tag.split("}")[1] == "Tasks":
6153 task = ele[0]
6154 break
6155 return task
6156
6157
6158 def power_on_vapp(self,vapp_id, vapp_name):
6159 """
6160 vapp_id - vApp uuid
6161 vapp_name - vAapp name
6162 return - Task object
6163 """
6164 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6165 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6166
6167 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6168 vapp_id)
6169 response = self.perform_request(req_type='POST',
6170 url=poweron_href,
6171 headers=headers)
6172
6173 if response.status_code != 202:
6174 self.logger.error("REST call {} failed reason : {}"\
6175 "status code : {} ".format(poweron_href,
6176 response.content,
6177 response.status_code))
6178 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6179 "vApp {}".format(vapp_name))
6180 else:
6181 poweron_task = self.get_task_from_response(response.content)
6182 return poweron_task
6183
6184