Minor vmware vcd vimconnectory fixes
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
46 from pyvcloud.vcd.vdc import VDC
47 from pyvcloud.vcd.org import Org
48 import re
49 from pyvcloud.vcd.vapp import VApp
50 from xml.sax.saxutils import escape
51 import logging
52 import json
53 import time
54 import uuid
55 import httplib
56 #For python3
57 #import http.client
58 import hashlib
59 import socket
60 import struct
61 import netaddr
62 import random
63
64 # global variable for vcd connector type
65 STANDALONE = 'standalone'
66
67 # key for flavor dicts
68 FLAVOR_RAM_KEY = 'ram'
69 FLAVOR_VCPUS_KEY = 'vcpus'
70 FLAVOR_DISK_KEY = 'disk'
71 DEFAULT_IP_PROFILE = {'dhcp_count':50,
72 'dhcp_enabled':True,
73 'ip_version':"IPv4"
74 }
75 # global variable for wait time
76 INTERVAL_TIME = 5
77 MAX_WAIT_TIME = 1800
78
79 API_VERSION = '5.9'
80
81 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
82 __date__ = "$09-Mar-2018 11:09:29$"
83 __version__ = '0.2'
84
85 # -1: "Could not be created",
86 # 0: "Unresolved",
87 # 1: "Resolved",
88 # 2: "Deployed",
89 # 3: "Suspended",
90 # 4: "Powered on",
91 # 5: "Waiting for user input",
92 # 6: "Unknown state",
93 # 7: "Unrecognized state",
94 # 8: "Powered off",
95 # 9: "Inconsistent state",
96 # 10: "Children do not all have the same status",
97 # 11: "Upload initiated, OVF descriptor pending",
98 # 12: "Upload initiated, copying contents",
99 # 13: "Upload initiated , disk contents pending",
100 # 14: "Upload has been quarantined",
101 # 15: "Upload quarantine period has expired"
102
103 # mapping vCD status to MANO
104 vcdStatusCode2manoFormat = {4: 'ACTIVE',
105 7: 'PAUSED',
106 3: 'SUSPENDED',
107 8: 'INACTIVE',
108 12: 'BUILD',
109 -1: 'ERROR',
110 14: 'DELETED'}
111
112 #
113 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
114 'ERROR': 'ERROR', 'DELETED': 'DELETED'
115 }
116
117 class vimconnector(vimconn.vimconnector):
118 # dict used to store flavor in memory
119 flavorlist = {}
120
121 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
122 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
123 """
124 Constructor create vmware connector to vCloud director.
125
126 By default construct doesn't validate connection state. So client can create object with None arguments.
127 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
128
129 a) It initialize organization UUID
130 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
131
132 Args:
133 uuid - is organization uuid.
134 name - is organization name that must be presented in vCloud director.
135 tenant_id - is VDC uuid it must be presented in vCloud director
136 tenant_name - is VDC name.
137 url - is hostname or ip address of vCloud director
138 url_admin - same as above.
139 user - is user that administrator for organization. Caller must make sure that
140 username has right privileges.
141
142 password - is password for a user.
143
144 VMware connector also requires PVDC administrative privileges and separate account.
145 This variables must be passed via config argument dict contains keys
146
147 dict['admin_username']
148 dict['admin_password']
149 config - Provide NSX and vCenter information
150
151 Returns:
152 Nothing.
153 """
154
155 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
156 url_admin, user, passwd, log_level, config)
157
158 self.logger = logging.getLogger('openmano.vim.vmware')
159 self.logger.setLevel(10)
160 self.persistent_info = persistent_info
161
162 self.name = name
163 self.id = uuid
164 self.url = url
165 self.url_admin = url_admin
166 self.tenant_id = tenant_id
167 self.tenant_name = tenant_name
168 self.user = user
169 self.passwd = passwd
170 self.config = config
171 self.admin_password = None
172 self.admin_user = None
173 self.org_name = ""
174 self.nsx_manager = None
175 self.nsx_user = None
176 self.nsx_password = None
177 self.availability_zone = None
178
179 # Disable warnings from self-signed certificates.
180 requests.packages.urllib3.disable_warnings()
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 #Set availability zone for Affinity rules
214 self.availability_zone = self.set_availability_zones()
215
216 # ############# Stub code for SRIOV #################
217 # try:
218 # self.dvs_name = config['dv_switch_name']
219 # except KeyError:
220 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
221 #
222 # self.vlanID_range = config.get("vlanID_range", None)
223
224 self.org_uuid = None
225 self.client = None
226
227 if not url:
228 raise vimconn.vimconnException('url param can not be NoneType')
229
230 if not self.url_admin: # try to use normal url
231 self.url_admin = self.url
232
233 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
234 self.tenant_id, self.tenant_name))
235 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
236 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
237
238 # initialize organization
239 if self.user is not None and self.passwd is not None and self.url:
240 self.init_organization()
241
242 def __getitem__(self, index):
243 if index == 'name':
244 return self.name
245 if index == 'tenant_id':
246 return self.tenant_id
247 if index == 'tenant_name':
248 return self.tenant_name
249 elif index == 'id':
250 return self.id
251 elif index == 'org_name':
252 return self.org_name
253 elif index == 'org_uuid':
254 return self.org_uuid
255 elif index == 'user':
256 return self.user
257 elif index == 'passwd':
258 return self.passwd
259 elif index == 'url':
260 return self.url
261 elif index == 'url_admin':
262 return self.url_admin
263 elif index == "config":
264 return self.config
265 else:
266 raise KeyError("Invalid key '%s'" % str(index))
267
268 def __setitem__(self, index, value):
269 if index == 'name':
270 self.name = value
271 if index == 'tenant_id':
272 self.tenant_id = value
273 if index == 'tenant_name':
274 self.tenant_name = value
275 elif index == 'id':
276 self.id = value
277 elif index == 'org_name':
278 self.org_name = value
279 elif index == 'org_uuid':
280 self.org_uuid = value
281 elif index == 'user':
282 self.user = value
283 elif index == 'passwd':
284 self.passwd = value
285 elif index == 'url':
286 self.url = value
287 elif index == 'url_admin':
288 self.url_admin = value
289 else:
290 raise KeyError("Invalid key '%s'" % str(index))
291
292 def connect_as_admin(self):
293 """ Method connect as pvdc admin user to vCloud director.
294 There are certain action that can be done only by provider vdc admin user.
295 Organization creation / provider network creation etc.
296
297 Returns:
298 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
299 """
300
301 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
302
303 try:
304 host = self.url
305 org = 'System'
306 client_as_admin = Client(host, verify_ssl_certs=False)
307 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
308 except Exception as e:
309 raise vimconn.vimconnException(
310 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
311
312 return client_as_admin
313
314 def connect(self):
315 """ Method connect as normal user to vCloud director.
316
317 Returns:
318 The return client object that latter can be used to connect to vCloud director as admin for VDC
319 """
320
321 try:
322 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
323 self.user,
324 self.org_name))
325 host = self.url
326 client = Client(host, verify_ssl_certs=False)
327 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
328 except:
329 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
330 "{} as user: {}".format(self.org_name, self.user))
331
332 return client
333
334 def init_organization(self):
335 """ Method initialize organization UUID and VDC parameters.
336
337 At bare minimum client must provide organization name that present in vCloud director and VDC.
338
339 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
340 The Org - UUID will be initialized at the run time if data center present in vCloud director.
341
342 Returns:
343 The return vca object that letter can be used to connect to vcloud direct as admin
344 """
345 client = self.connect()
346 if not client:
347 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
348
349 self.client = client
350 try:
351 if self.org_uuid is None:
352 org_list = client.get_org_list()
353 for org in org_list.Org:
354 # we set org UUID at the init phase but we can do it only when we have valid credential.
355 if org.get('name') == self.org_name:
356 self.org_uuid = org.get('href').split('/')[-1]
357 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
358 break
359 else:
360 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
361
362 # if well good we require for org details
363 org_details_dict = self.get_org(org_uuid=self.org_uuid)
364
365 # we have two case if we want to initialize VDC ID or VDC name at run time
366 # tenant_name provided but no tenant id
367 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
368 vdcs_dict = org_details_dict['vdcs']
369 for vdc in vdcs_dict:
370 if vdcs_dict[vdc] == self.tenant_name:
371 self.tenant_id = vdc
372 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
373 self.org_name))
374 break
375 else:
376 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
377 # case two we have tenant_id but we don't have tenant name so we find and set it.
378 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
379 vdcs_dict = org_details_dict['vdcs']
380 for vdc in vdcs_dict:
381 if vdc == self.tenant_id:
382 self.tenant_name = vdcs_dict[vdc]
383 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
384 self.org_name))
385 break
386 else:
387 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
388 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
389 except:
390 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
391 self.logger.debug(traceback.format_exc())
392 self.org_uuid = None
393
394 def new_tenant(self, tenant_name=None, tenant_description=None):
395 """ Method adds a new tenant to VIM with this name.
396 This action requires access to create VDC action in vCloud director.
397
398 Args:
399 tenant_name is tenant_name to be created.
400 tenant_description not used for this call
401
402 Return:
403 returns the tenant identifier in UUID format.
404 If action is failed method will throw vimconn.vimconnException method
405 """
406 vdc_task = self.create_vdc(vdc_name=tenant_name)
407 if vdc_task is not None:
408 vdc_uuid, value = vdc_task.popitem()
409 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
410 return vdc_uuid
411 else:
412 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
413
414 def delete_tenant(self, tenant_id=None):
415 """ Delete a tenant from VIM
416 Args:
417 tenant_id is tenant_id to be deleted.
418
419 Return:
420 returns the tenant identifier in UUID format.
421 If action is failed method will throw exception
422 """
423 vca = self.connect_as_admin()
424 if not vca:
425 raise vimconn.vimconnConnectionException("Failed to connect vCD")
426
427 if tenant_id is not None:
428 if vca._session:
429 #Get OrgVDC
430 url_list = [self.url, '/api/vdc/', tenant_id]
431 orgvdc_herf = ''.join(url_list)
432
433 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
434 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
435 response = self.perform_request(req_type='GET',
436 url=orgvdc_herf,
437 headers=headers)
438
439 if response.status_code != requests.codes.ok:
440 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
441 "Return status code {}".format(orgvdc_herf,
442 response.status_code))
443 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
444
445 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
446 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
447 #For python3
448 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
449 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
450 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
451 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
452
453 response = self.perform_request(req_type='DELETE',
454 url=vdc_remove_href,
455 headers=headers)
456
457 if response.status_code == 202:
458 time.sleep(5)
459 return tenant_id
460 else:
461 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
462 "Return status code {}".format(vdc_remove_href,
463 response.status_code))
464 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
465 else:
466 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
467 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
468
469
470 def get_tenant_list(self, filter_dict={}):
471 """Obtain tenants of VIM
472 filter_dict can contain the following keys:
473 name: filter by tenant name
474 id: filter by tenant uuid/id
475 <other VIM specific>
476 Returns the tenant list of dictionaries:
477 [{'name':'<name>, 'id':'<id>, ...}, ...]
478
479 """
480 org_dict = self.get_org(self.org_uuid)
481 vdcs_dict = org_dict['vdcs']
482
483 vdclist = []
484 try:
485 for k in vdcs_dict:
486 entry = {'name': vdcs_dict[k], 'id': k}
487 # if caller didn't specify dictionary we return all tenants.
488 if filter_dict is not None and filter_dict:
489 filtered_entry = entry.copy()
490 filtered_dict = set(entry.keys()) - set(filter_dict)
491 for unwanted_key in filtered_dict: del entry[unwanted_key]
492 if filter_dict == entry:
493 vdclist.append(filtered_entry)
494 else:
495 vdclist.append(entry)
496 except:
497 self.logger.debug("Error in get_tenant_list()")
498 self.logger.debug(traceback.format_exc())
499 raise vimconn.vimconnException("Incorrect state. {}")
500
501 return vdclist
502
503 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
504 """Adds a tenant network to VIM
505 net_name is the name
506 net_type can be 'bridge','data'.'ptp'.
507 ip_profile is a dict containing the IP parameters of the network
508 shared is a boolean
509 Returns the network identifier"""
510
511 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
512 .format(net_name, net_type, ip_profile, shared))
513
514 isshared = 'false'
515 if shared:
516 isshared = 'true'
517
518 # ############# Stub code for SRIOV #################
519 # if net_type == "data" or net_type == "ptp":
520 # if self.config.get('dv_switch_name') == None:
521 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
522 # network_uuid = self.create_dvPort_group(net_name)
523
524 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
525 ip_profile=ip_profile, isshared=isshared)
526 if network_uuid is not None:
527 return network_uuid
528 else:
529 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
530
531 def get_vcd_network_list(self):
532 """ Method available organization for a logged in tenant
533
534 Returns:
535 The return vca object that letter can be used to connect to vcloud direct as admin
536 """
537
538 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
539
540 if not self.tenant_name:
541 raise vimconn.vimconnConnectionException("Tenant name is empty.")
542
543 org, vdc = self.get_vdc_details()
544 if vdc is None:
545 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
546
547 vdc_uuid = vdc.get('id').split(":")[3]
548 if self.client._session:
549 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
550 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
551 response = self.perform_request(req_type='GET',
552 url=vdc.get('href'),
553 headers=headers)
554 if response.status_code != 200:
555 self.logger.error("Failed to get vdc content")
556 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
557 else:
558 content = XmlElementTree.fromstring(response.content)
559
560 network_list = []
561 try:
562 for item in content:
563 if item.tag.split('}')[-1] == 'AvailableNetworks':
564 for net in item:
565 response = self.perform_request(req_type='GET',
566 url=net.get('href'),
567 headers=headers)
568
569 if response.status_code != 200:
570 self.logger.error("Failed to get network content")
571 raise vimconn.vimconnNotFoundException("Failed to get network content")
572 else:
573 net_details = XmlElementTree.fromstring(response.content)
574
575 filter_dict = {}
576 net_uuid = net_details.get('id').split(":")
577 if len(net_uuid) != 4:
578 continue
579 else:
580 net_uuid = net_uuid[3]
581 # create dict entry
582 self.logger.debug("get_vcd_network_list(): Adding network {} "
583 "to a list vcd id {} network {}".format(net_uuid,
584 vdc_uuid,
585 net_details.get('name')))
586 filter_dict["name"] = net_details.get('name')
587 filter_dict["id"] = net_uuid
588 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
589 shared = True
590 else:
591 shared = False
592 filter_dict["shared"] = shared
593 filter_dict["tenant_id"] = vdc_uuid
594 if int(net_details.get('status')) == 1:
595 filter_dict["admin_state_up"] = True
596 else:
597 filter_dict["admin_state_up"] = False
598 filter_dict["status"] = "ACTIVE"
599 filter_dict["type"] = "bridge"
600 network_list.append(filter_dict)
601 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
602 except:
603 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
604 pass
605
606 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
607 return network_list
608
609 def get_network_list(self, filter_dict={}):
610 """Obtain tenant networks of VIM
611 Filter_dict can be:
612 name: network name OR/AND
613 id: network uuid OR/AND
614 shared: boolean OR/AND
615 tenant_id: tenant OR/AND
616 admin_state_up: boolean
617 status: 'ACTIVE'
618
619 [{key : value , key : value}]
620
621 Returns the network list of dictionaries:
622 [{<the fields at Filter_dict plus some VIM specific>}, ...]
623 List can be empty
624 """
625
626 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
627
628 if not self.tenant_name:
629 raise vimconn.vimconnConnectionException("Tenant name is empty.")
630
631 org, vdc = self.get_vdc_details()
632 if vdc is None:
633 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
634
635 try:
636 vdcid = vdc.get('id').split(":")[3]
637
638 if self.client._session:
639 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
640 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
641 response = self.perform_request(req_type='GET',
642 url=vdc.get('href'),
643 headers=headers)
644 if response.status_code != 200:
645 self.logger.error("Failed to get vdc content")
646 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
647 else:
648 content = XmlElementTree.fromstring(response.content)
649
650 network_list = []
651 for item in content:
652 if item.tag.split('}')[-1] == 'AvailableNetworks':
653 for net in item:
654 response = self.perform_request(req_type='GET',
655 url=net.get('href'),
656 headers=headers)
657
658 if response.status_code != 200:
659 self.logger.error("Failed to get network content")
660 raise vimconn.vimconnNotFoundException("Failed to get network content")
661 else:
662 net_details = XmlElementTree.fromstring(response.content)
663
664 filter_entry = {}
665 net_uuid = net_details.get('id').split(":")
666 if len(net_uuid) != 4:
667 continue
668 else:
669 net_uuid = net_uuid[3]
670 # create dict entry
671 self.logger.debug("get_network_list(): Adding net {}"
672 " to a list vcd id {} network {}".format(net_uuid,
673 vdcid,
674 net_details.get('name')))
675 filter_entry["name"] = net_details.get('name')
676 filter_entry["id"] = net_uuid
677 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
678 shared = True
679 else:
680 shared = False
681 filter_entry["shared"] = shared
682 filter_entry["tenant_id"] = vdcid
683 if int(net_details.get('status')) == 1:
684 filter_entry["admin_state_up"] = True
685 else:
686 filter_entry["admin_state_up"] = False
687 filter_entry["status"] = "ACTIVE"
688 filter_entry["type"] = "bridge"
689 filtered_entry = filter_entry.copy()
690
691 if filter_dict is not None and filter_dict:
692 # we remove all the key : value we don't care and match only
693 # respected field
694 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
695 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
696 if filter_dict == filter_entry:
697 network_list.append(filtered_entry)
698 else:
699 network_list.append(filtered_entry)
700 except Exception as e:
701 self.logger.debug("Error in get_network_list",exc_info=True)
702 if isinstance(e, vimconn.vimconnException):
703 raise
704 else:
705 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
706
707 self.logger.debug("Returning {}".format(network_list))
708 return network_list
709
710 def get_network(self, net_id):
711 """Method obtains network details of net_id VIM network
712 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
713
714 try:
715 org, vdc = self.get_vdc_details()
716 vdc_id = vdc.get('id').split(":")[3]
717 if self.client._session:
718 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
719 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
720 response = self.perform_request(req_type='GET',
721 url=vdc.get('href'),
722 headers=headers)
723 if response.status_code != 200:
724 self.logger.error("Failed to get vdc content")
725 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
726 else:
727 content = XmlElementTree.fromstring(response.content)
728
729 filter_dict = {}
730
731 for item in content:
732 if item.tag.split('}')[-1] == 'AvailableNetworks':
733 for net in item:
734 response = self.perform_request(req_type='GET',
735 url=net.get('href'),
736 headers=headers)
737
738 if response.status_code != 200:
739 self.logger.error("Failed to get network content")
740 raise vimconn.vimconnNotFoundException("Failed to get network content")
741 else:
742 net_details = XmlElementTree.fromstring(response.content)
743
744 vdc_network_id = net_details.get('id').split(":")
745 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
746 filter_dict["name"] = net_details.get('name')
747 filter_dict["id"] = vdc_network_id[3]
748 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
749 shared = True
750 else:
751 shared = False
752 filter_dict["shared"] = shared
753 filter_dict["tenant_id"] = vdc_id
754 if int(net_details.get('status')) == 1:
755 filter_dict["admin_state_up"] = True
756 else:
757 filter_dict["admin_state_up"] = False
758 filter_dict["status"] = "ACTIVE"
759 filter_dict["type"] = "bridge"
760 self.logger.debug("Returning {}".format(filter_dict))
761 return filter_dict
762 else:
763 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
764 except Exception as e:
765 self.logger.debug("Error in get_network")
766 self.logger.debug(traceback.format_exc())
767 if isinstance(e, vimconn.vimconnException):
768 raise
769 else:
770 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
771
772 return filter_dict
773
774 def delete_network(self, net_id):
775 """
776 Method Deletes a tenant network from VIM, provide the network id.
777
778 Returns the network identifier or raise an exception
779 """
780
781 # ############# Stub code for SRIOV #################
782 # dvport_group = self.get_dvport_group(net_id)
783 # if dvport_group:
784 # #delete portgroup
785 # status = self.destroy_dvport_group(net_id)
786 # if status:
787 # # Remove vlanID from persistent info
788 # if net_id in self.persistent_info["used_vlanIDs"]:
789 # del self.persistent_info["used_vlanIDs"][net_id]
790 #
791 # return net_id
792
793 vcd_network = self.get_vcd_network(network_uuid=net_id)
794 if vcd_network is not None and vcd_network:
795 if self.delete_network_action(network_uuid=net_id):
796 return net_id
797 else:
798 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
799
800 def refresh_nets_status(self, net_list):
801 """Get the status of the networks
802 Params: the list of network identifiers
803 Returns a dictionary with:
804 net_id: #VIM id of this network
805 status: #Mandatory. Text with one of:
806 # DELETED (not found at vim)
807 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
808 # OTHER (Vim reported other status not understood)
809 # ERROR (VIM indicates an ERROR status)
810 # ACTIVE, INACTIVE, DOWN (admin down),
811 # BUILD (on building process)
812 #
813 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
814 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
815
816 """
817
818 dict_entry = {}
819 try:
820 for net in net_list:
821 errormsg = ''
822 vcd_network = self.get_vcd_network(network_uuid=net)
823 if vcd_network is not None and vcd_network:
824 if vcd_network['status'] == '1':
825 status = 'ACTIVE'
826 else:
827 status = 'DOWN'
828 else:
829 status = 'DELETED'
830 errormsg = 'Network not found.'
831
832 dict_entry[net] = {'status': status, 'error_msg': errormsg,
833 'vim_info': yaml.safe_dump(vcd_network)}
834 except:
835 self.logger.debug("Error in refresh_nets_status")
836 self.logger.debug(traceback.format_exc())
837
838 return dict_entry
839
840 def get_flavor(self, flavor_id):
841 """Obtain flavor details from the VIM
842 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
843 """
844 if flavor_id not in vimconnector.flavorlist:
845 raise vimconn.vimconnNotFoundException("Flavor not found.")
846 return vimconnector.flavorlist[flavor_id]
847
848 def new_flavor(self, flavor_data):
849 """Adds a tenant flavor to VIM
850 flavor_data contains a dictionary with information, keys:
851 name: flavor name
852 ram: memory (cloud type) in MBytes
853 vpcus: cpus (cloud type)
854 extended: EPA parameters
855 - numas: #items requested in same NUMA
856 memory: number of 1G huge pages memory
857 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
858 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
859 - name: interface name
860 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
861 bandwidth: X Gbps; requested guarantee bandwidth
862 vpci: requested virtual PCI address
863 disk: disk size
864 is_public:
865 #TODO to concrete
866 Returns the flavor identifier"""
867
868 # generate a new uuid put to internal dict and return it.
869 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
870 new_flavor=flavor_data
871 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
872 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
873 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
874
875 if not isinstance(ram, int):
876 raise vimconn.vimconnException("Non-integer value for ram")
877 elif not isinstance(cpu, int):
878 raise vimconn.vimconnException("Non-integer value for cpu")
879 elif not isinstance(disk, int):
880 raise vimconn.vimconnException("Non-integer value for disk")
881
882 extended_flv = flavor_data.get("extended")
883 if extended_flv:
884 numas=extended_flv.get("numas")
885 if numas:
886 for numa in numas:
887 #overwrite ram and vcpus
888 if 'memory' in numa:
889 ram = numa['memory']*1024
890 if 'paired-threads' in numa:
891 cpu = numa['paired-threads']*2
892 elif 'cores' in numa:
893 cpu = numa['cores']
894 elif 'threads' in numa:
895 cpu = numa['threads']
896
897 new_flavor[FLAVOR_RAM_KEY] = ram
898 new_flavor[FLAVOR_VCPUS_KEY] = cpu
899 new_flavor[FLAVOR_DISK_KEY] = disk
900 # generate a new uuid put to internal dict and return it.
901 flavor_id = uuid.uuid4()
902 vimconnector.flavorlist[str(flavor_id)] = new_flavor
903 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
904
905 return str(flavor_id)
906
907 def delete_flavor(self, flavor_id):
908 """Deletes a tenant flavor from VIM identify by its id
909
910 Returns the used id or raise an exception
911 """
912 if flavor_id not in vimconnector.flavorlist:
913 raise vimconn.vimconnNotFoundException("Flavor not found.")
914
915 vimconnector.flavorlist.pop(flavor_id, None)
916 return flavor_id
917
918 def new_image(self, image_dict):
919 """
920 Adds a tenant image to VIM
921 Returns:
922 200, image-id if the image is created
923 <0, message if there is an error
924 """
925
926 return self.get_image_id_from_path(image_dict['location'])
927
928 def delete_image(self, image_id):
929 """
930 Deletes a tenant image from VIM
931 Args:
932 image_id is ID of Image to be deleted
933 Return:
934 returns the image identifier in UUID format or raises an exception on error
935 """
936 conn = self.connect_as_admin()
937 if not conn:
938 raise vimconn.vimconnConnectionException("Failed to connect vCD")
939 # Get Catalog details
940 url_list = [self.url, '/api/catalog/', image_id]
941 catalog_herf = ''.join(url_list)
942
943 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
944 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
945
946 response = self.perform_request(req_type='GET',
947 url=catalog_herf,
948 headers=headers)
949
950 if response.status_code != requests.codes.ok:
951 self.logger.debug("delete_image():GET REST API call {} failed. "\
952 "Return status code {}".format(catalog_herf,
953 response.status_code))
954 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
955
956 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
957 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
958 #For python3
959 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
960 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
961
962 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
963 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
964 for catalogItem in catalogItems:
965 catalogItem_href = catalogItem.attrib['href']
966
967 response = self.perform_request(req_type='GET',
968 url=catalogItem_href,
969 headers=headers)
970
971 if response.status_code != requests.codes.ok:
972 self.logger.debug("delete_image():GET REST API call {} failed. "\
973 "Return status code {}".format(catalog_herf,
974 response.status_code))
975 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
976 catalogItem,
977 image_id))
978
979 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
980 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
981 #For python3
982 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
983 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
984 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
985
986 #Remove catalogItem
987 response = self.perform_request(req_type='DELETE',
988 url=catalogitem_remove_href,
989 headers=headers)
990 if response.status_code == requests.codes.no_content:
991 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
992 else:
993 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
994
995 #Remove catalog
996 url_list = [self.url, '/api/admin/catalog/', image_id]
997 catalog_remove_herf = ''.join(url_list)
998 response = self.perform_request(req_type='DELETE',
999 url=catalog_remove_herf,
1000 headers=headers)
1001
1002 if response.status_code == requests.codes.no_content:
1003 self.logger.debug("Deleted Catalog {}".format(image_id))
1004 return image_id
1005 else:
1006 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1007
1008
1009 def catalog_exists(self, catalog_name, catalogs):
1010 """
1011
1012 :param catalog_name:
1013 :param catalogs:
1014 :return:
1015 """
1016 for catalog in catalogs:
1017 if catalog['name'] == catalog_name:
1018 return True
1019 return False
1020
1021 def create_vimcatalog(self, vca=None, catalog_name=None):
1022 """ Create new catalog entry in vCloud director.
1023
1024 Args
1025 vca: vCloud director.
1026 catalog_name catalog that client wish to create. Note no validation done for a name.
1027 Client must make sure that provide valid string representation.
1028
1029 Return (bool) True if catalog created.
1030
1031 """
1032 try:
1033 result = vca.create_catalog(catalog_name, catalog_name)
1034 if result is not None:
1035 return True
1036 catalogs = vca.list_catalogs()
1037 except:
1038 return False
1039 return self.catalog_exists(catalog_name, catalogs)
1040
1041 # noinspection PyIncorrectDocstring
1042 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1043 description='', progress=False, chunk_bytes=128 * 1024):
1044 """
1045 Uploads a OVF file to a vCloud catalog
1046
1047 :param chunk_bytes:
1048 :param progress:
1049 :param description:
1050 :param image_name:
1051 :param vca:
1052 :param catalog_name: (str): The name of the catalog to upload the media.
1053 :param media_file_name: (str): The name of the local media file to upload.
1054 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1055 """
1056 os.path.isfile(media_file_name)
1057 statinfo = os.stat(media_file_name)
1058
1059 # find a catalog entry where we upload OVF.
1060 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1061 # status change.
1062 # if VCD can parse OVF we upload VMDK file
1063 try:
1064 for catalog in vca.list_catalogs():
1065 if catalog_name != catalog['name']:
1066 continue
1067 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1068 data = """
1069 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1070 """.format(catalog_name, description)
1071
1072 if self.client:
1073 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1074 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1075 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1076
1077 response = self.perform_request(req_type='POST',
1078 url=catalog_href,
1079 headers=headers,
1080 data=data)
1081
1082 if response.status_code == requests.codes.created:
1083 catalogItem = XmlElementTree.fromstring(response.content)
1084 entity = [child for child in catalogItem if
1085 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1086 href = entity.get('href')
1087 template = href
1088
1089 response = self.perform_request(req_type='GET',
1090 url=href,
1091 headers=headers)
1092
1093 if response.status_code == requests.codes.ok:
1094 headers['Content-Type'] = 'Content-Type text/xml'
1095 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1096 if result:
1097 transfer_href = result.group(1)
1098
1099 response = self.perform_request(req_type='PUT',
1100 url=transfer_href,
1101 headers=headers,
1102 data=open(media_file_name, 'rb'))
1103 if response.status_code != requests.codes.ok:
1104 self.logger.debug(
1105 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1106 media_file_name))
1107 return False
1108
1109 # TODO fix this with aync block
1110 time.sleep(5)
1111
1112 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1113
1114 # uploading VMDK file
1115 # check status of OVF upload and upload remaining files.
1116 response = self.perform_request(req_type='GET',
1117 url=template,
1118 headers=headers)
1119
1120 if response.status_code == requests.codes.ok:
1121 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1122 if result:
1123 link_href = result.group(1)
1124 # we skip ovf since it already uploaded.
1125 if 'ovf' in link_href:
1126 continue
1127 # The OVF file and VMDK must be in a same directory
1128 head, tail = os.path.split(media_file_name)
1129 file_vmdk = head + '/' + link_href.split("/")[-1]
1130 if not os.path.isfile(file_vmdk):
1131 return False
1132 statinfo = os.stat(file_vmdk)
1133 if statinfo.st_size == 0:
1134 return False
1135 hrefvmdk = link_href
1136
1137 if progress:
1138 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1139 FileTransferSpeed()]
1140 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1141
1142 bytes_transferred = 0
1143 f = open(file_vmdk, 'rb')
1144 while bytes_transferred < statinfo.st_size:
1145 my_bytes = f.read(chunk_bytes)
1146 if len(my_bytes) <= chunk_bytes:
1147 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1148 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1149 headers['Content-Length'] = str(len(my_bytes))
1150 response = requests.put(url=hrefvmdk,
1151 headers=headers,
1152 data=my_bytes,
1153 verify=False)
1154 if response.status_code == requests.codes.ok:
1155 bytes_transferred += len(my_bytes)
1156 if progress:
1157 progress_bar.update(bytes_transferred)
1158 else:
1159 self.logger.debug(
1160 'file upload failed with error: [%s] %s' % (response.status_code,
1161 response.content))
1162
1163 f.close()
1164 return False
1165 f.close()
1166 if progress:
1167 progress_bar.finish()
1168 time.sleep(10)
1169 return True
1170 else:
1171 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1172 format(catalog_name, media_file_name))
1173 return False
1174 except Exception as exp:
1175 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1176 .format(catalog_name,media_file_name, exp))
1177 raise vimconn.vimconnException(
1178 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1179 .format(catalog_name,media_file_name, exp))
1180
1181 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1182 return False
1183
1184 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1185 """Upload media file"""
1186 # TODO add named parameters for readability
1187
1188 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1189 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1190
1191 def validate_uuid4(self, uuid_string=None):
1192 """ Method validate correct format of UUID.
1193
1194 Return: true if string represent valid uuid
1195 """
1196 try:
1197 val = uuid.UUID(uuid_string, version=4)
1198 except ValueError:
1199 return False
1200 return True
1201
1202 def get_catalogid(self, catalog_name=None, catalogs=None):
1203 """ Method check catalog and return catalog ID in UUID format.
1204
1205 Args
1206 catalog_name: catalog name as string
1207 catalogs: list of catalogs.
1208
1209 Return: catalogs uuid
1210 """
1211
1212 for catalog in catalogs:
1213 if catalog['name'] == catalog_name:
1214 catalog_id = catalog['id']
1215 return catalog_id
1216 return None
1217
1218 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1219 """ Method check catalog and return catalog name lookup done by catalog UUID.
1220
1221 Args
1222 catalog_name: catalog name as string
1223 catalogs: list of catalogs.
1224
1225 Return: catalogs name or None
1226 """
1227
1228 if not self.validate_uuid4(uuid_string=catalog_uuid):
1229 return None
1230
1231 for catalog in catalogs:
1232 catalog_id = catalog.get('id')
1233 if catalog_id == catalog_uuid:
1234 return catalog.get('name')
1235 return None
1236
1237 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1238 """ Method check catalog and return catalog name lookup done by catalog UUID.
1239
1240 Args
1241 catalog_name: catalog name as string
1242 catalogs: list of catalogs.
1243
1244 Return: catalogs name or None
1245 """
1246
1247 if not self.validate_uuid4(uuid_string=catalog_uuid):
1248 return None
1249
1250 for catalog in catalogs:
1251 catalog_id = catalog.get('id')
1252 if catalog_id == catalog_uuid:
1253 return catalog
1254 return None
1255
1256 def get_image_id_from_path(self, path=None, progress=False):
1257 """ Method upload OVF image to vCloud director.
1258
1259 Each OVF image represented as single catalog entry in vcloud director.
1260 The method check for existing catalog entry. The check done by file name without file extension.
1261
1262 if given catalog name already present method will respond with existing catalog uuid otherwise
1263 it will create new catalog entry and upload OVF file to newly created catalog.
1264
1265 If method can't create catalog entry or upload a file it will throw exception.
1266
1267 Method accept boolean flag progress that will output progress bar. It useful method
1268 for standalone upload use case. In case to test large file upload.
1269
1270 Args
1271 path: - valid path to OVF file.
1272 progress - boolean progress bar show progress bar.
1273
1274 Return: if image uploaded correct method will provide image catalog UUID.
1275 """
1276
1277 if not path:
1278 raise vimconn.vimconnException("Image path can't be None.")
1279
1280 if not os.path.isfile(path):
1281 raise vimconn.vimconnException("Can't read file. File not found.")
1282
1283 if not os.access(path, os.R_OK):
1284 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1285
1286 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1287
1288 dirpath, filename = os.path.split(path)
1289 flname, file_extension = os.path.splitext(path)
1290 if file_extension != '.ovf':
1291 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1292 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1293
1294 catalog_name = os.path.splitext(filename)[0]
1295 catalog_md5_name = hashlib.md5(path).hexdigest()
1296 self.logger.debug("File name {} Catalog Name {} file path {} "
1297 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1298
1299 try:
1300 org,vdc = self.get_vdc_details()
1301 catalogs = org.list_catalogs()
1302 except Exception as exp:
1303 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1304 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1305
1306 if len(catalogs) == 0:
1307 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1308 result = self.create_vimcatalog(org, catalog_md5_name)
1309 if not result:
1310 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1311
1312 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1313 media_name=filename, medial_file_name=path, progress=progress)
1314 if not result:
1315 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1316 return self.get_catalogid(catalog_name, catalogs)
1317 else:
1318 for catalog in catalogs:
1319 # search for existing catalog if we find same name we return ID
1320 # TODO optimize this
1321 if catalog['name'] == catalog_md5_name:
1322 self.logger.debug("Found existing catalog entry for {} "
1323 "catalog id {}".format(catalog_name,
1324 self.get_catalogid(catalog_md5_name, catalogs)))
1325 return self.get_catalogid(catalog_md5_name, catalogs)
1326
1327 # if we didn't find existing catalog we create a new one and upload image.
1328 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1329 result = self.create_vimcatalog(org, catalog_md5_name)
1330 if not result:
1331 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1332
1333 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1334 media_name=filename, medial_file_name=path, progress=progress)
1335 if not result:
1336 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1337
1338 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1339
1340 def get_image_list(self, filter_dict={}):
1341 '''Obtain tenant images from VIM
1342 Filter_dict can be:
1343 name: image name
1344 id: image uuid
1345 checksum: image checksum
1346 location: image path
1347 Returns the image list of dictionaries:
1348 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1349 List can be empty
1350 '''
1351
1352 try:
1353 org, vdc = self.get_vdc_details()
1354 image_list = []
1355 catalogs = org.list_catalogs()
1356 if len(catalogs) == 0:
1357 return image_list
1358 else:
1359 for catalog in catalogs:
1360 catalog_uuid = catalog.get('id')
1361 name = catalog.get('name')
1362 filtered_dict = {}
1363 if filter_dict.get("name") and filter_dict["name"] != name:
1364 continue
1365 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1366 continue
1367 filtered_dict ["name"] = name
1368 filtered_dict ["id"] = catalog_uuid
1369 image_list.append(filtered_dict)
1370
1371 self.logger.debug("List of already created catalog items: {}".format(image_list))
1372 return image_list
1373 except Exception as exp:
1374 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1375
1376 def get_vappid(self, vdc=None, vapp_name=None):
1377 """ Method takes vdc object and vApp name and returns vapp uuid or None
1378
1379 Args:
1380 vdc: The VDC object.
1381 vapp_name: is application vappp name identifier
1382
1383 Returns:
1384 The return vApp name otherwise None
1385 """
1386 if vdc is None or vapp_name is None:
1387 return None
1388 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1389 try:
1390 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1391 vdc.ResourceEntities.ResourceEntity)
1392 #For python3
1393 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1394 # if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1395 if len(refs) == 1:
1396 return refs[0].href.split("vapp")[1][1:]
1397 except Exception as e:
1398 self.logger.exception(e)
1399 return False
1400 return None
1401
1402 def check_vapp(self, vdc=None, vapp_uuid=None):
1403 """ Method Method returns True or False if vapp deployed in vCloud director
1404
1405 Args:
1406 vca: Connector to VCA
1407 vdc: The VDC object.
1408 vappid: vappid is application identifier
1409
1410 Returns:
1411 The return True if vApp deployed
1412 :param vdc:
1413 :param vapp_uuid:
1414 """
1415 try:
1416 refs = filter(lambda ref:
1417 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1418 vdc.ResourceEntities.ResourceEntity)
1419 #For python3
1420 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1421 # if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1422 for ref in refs:
1423 vappid = ref.href.split("vapp")[1][1:]
1424 # find vapp with respected vapp uuid
1425 if vappid == vapp_uuid:
1426 return True
1427 except Exception as e:
1428 self.logger.exception(e)
1429 return False
1430 return False
1431
1432 def get_namebyvappid(self, vapp_uuid=None):
1433 """Method returns vApp name from vCD and lookup done by vapp_id.
1434
1435 Args:
1436 vapp_uuid: vappid is application identifier
1437
1438 Returns:
1439 The return vApp name otherwise None
1440 """
1441 try:
1442 if self.client and vapp_uuid:
1443 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1444 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1445 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1446
1447 response = self.perform_request(req_type='GET',
1448 url=vapp_call,
1449 headers=headers)
1450 #Retry login if session expired & retry sending request
1451 if response.status_code == 403:
1452 response = self.retry_rest('GET', vapp_call)
1453
1454 tree = XmlElementTree.fromstring(response.content)
1455 return tree.attrib['name']
1456 except Exception as e:
1457 self.logger.exception(e)
1458 return None
1459 return None
1460
1461 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1462 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1463 """Adds a VM instance to VIM
1464 Params:
1465 'start': (boolean) indicates if VM must start or created in pause mode.
1466 'image_id','flavor_id': image and flavor VIM id to use for the VM
1467 'net_list': list of interfaces, each one is a dictionary with:
1468 'name': (optional) name for the interface.
1469 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1470 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1471 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1472 'mac_address': (optional) mac address to assign to this interface
1473 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1474 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1475 'type': (mandatory) can be one of:
1476 'virtual', in this case always connected to a network of type 'net_type=bridge'
1477 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1478 can created unconnected
1479 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1480 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1481 are allocated on the same physical NIC
1482 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1483 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1484 or True, it must apply the default VIM behaviour
1485 After execution the method will add the key:
1486 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1487 interface. 'net_list' is modified
1488 'cloud_config': (optional) dictionary with:
1489 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1490 'users': (optional) list of users to be inserted, each item is a dict with:
1491 'name': (mandatory) user name,
1492 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1493 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1494 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1495 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1496 'dest': (mandatory) string with the destination absolute path
1497 'encoding': (optional, by default text). Can be one of:
1498 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1499 'content' (mandatory): string with the content of the file
1500 'permissions': (optional) string with file permissions, typically octal notation '0644'
1501 'owner': (optional) file owner, string with the format 'owner:group'
1502 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1503 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1504 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1505 'size': (mandatory) string with the size of the disk in GB
1506 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1507 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1508 availability_zone_index is None
1509 Returns a tuple with the instance identifier and created_items or raises an exception on error
1510 created_items can be None or a dictionary where this method can include key-values that will be passed to
1511 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1512 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1513 as not present.
1514 """
1515 self.logger.info("Creating new instance for entry {}".format(name))
1516 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1517 "availability_zone_index {} availability_zone_list {}"\
1518 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1519 availability_zone_index, availability_zone_list))
1520
1521 #new vm name = vmname + tenant_id + uuid
1522 new_vm_name = [name, '-', str(uuid.uuid4())]
1523 vmname_andid = ''.join(new_vm_name)
1524
1525 for net in net_list:
1526 if net['type'] == "PCI-PASSTHROUGH":
1527 raise vimconn.vimconnNotSupportedException(
1528 "Current vCD version does not support type : {}".format(net['type']))
1529
1530 if len(net_list) > 10:
1531 raise vimconn.vimconnNotSupportedException(
1532 "The VM hardware versions 7 and above support upto 10 NICs only")
1533
1534 # if vm already deployed we return existing uuid
1535 # we check for presence of VDC, Catalog entry and Flavor.
1536 org, vdc = self.get_vdc_details()
1537 if vdc is None:
1538 raise vimconn.vimconnNotFoundException(
1539 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1540 catalogs = org.list_catalogs()
1541 if catalogs is None:
1542 #Retry once, if failed by refreshing token
1543 self.get_token()
1544 org = Org(self.client, resource=self.client.get_org())
1545 catalogs = org.list_catalogs()
1546 if catalogs is None:
1547 raise vimconn.vimconnNotFoundException(
1548 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1549
1550 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1551 if catalog_hash_name:
1552 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1553 else:
1554 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1555 "(Failed retrieve catalog information {})".format(name, image_id))
1556
1557 # Set vCPU and Memory based on flavor.
1558 vm_cpus = None
1559 vm_memory = None
1560 vm_disk = None
1561 numas = None
1562
1563 if flavor_id is not None:
1564 if flavor_id not in vimconnector.flavorlist:
1565 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1566 "Failed retrieve flavor information "
1567 "flavor id {}".format(name, flavor_id))
1568 else:
1569 try:
1570 flavor = vimconnector.flavorlist[flavor_id]
1571 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1572 vm_memory = flavor[FLAVOR_RAM_KEY]
1573 vm_disk = flavor[FLAVOR_DISK_KEY]
1574 extended = flavor.get("extended", None)
1575 if extended:
1576 numas=extended.get("numas", None)
1577
1578 except Exception as exp:
1579 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1580
1581 # image upload creates template name as catalog name space Template.
1582 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1583 power_on = 'false'
1584 if start:
1585 power_on = 'true'
1586
1587 # client must provide at least one entry in net_list if not we report error
1588 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1589 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1590 primary_net = None
1591 primary_netname = None
1592 primary_net_href = None
1593 network_mode = 'bridged'
1594 if net_list is not None and len(net_list) > 0:
1595 for net in net_list:
1596 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1597 primary_net = net
1598 if primary_net is None:
1599 primary_net = net_list[0]
1600
1601 try:
1602 primary_net_id = primary_net['net_id']
1603 url_list = [self.url, '/api/network/', primary_net_id]
1604 primary_net_href = ''.join(url_list)
1605 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1606 if 'name' in network_dict:
1607 primary_netname = network_dict['name']
1608
1609 except KeyError:
1610 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1611 else:
1612 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1613
1614 # use: 'data', 'bridge', 'mgmt'
1615 # create vApp. Set vcpu and ram based on flavor id.
1616 try:
1617 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1618 if not vdc_obj:
1619 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1620
1621 for retry in (1,2):
1622 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1623 catalog_items = [items.attrib]
1624
1625 if len(catalog_items) == 1:
1626 if self.client:
1627 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1628 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1629
1630 response = self.perform_request(req_type='GET',
1631 url=catalog_items[0].get('href'),
1632 headers=headers)
1633 catalogItem = XmlElementTree.fromstring(response.content)
1634 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1635 vapp_tempalte_href = entity.get("href")
1636
1637 response = self.perform_request(req_type='GET',
1638 url=vapp_tempalte_href,
1639 headers=headers)
1640 if response.status_code != requests.codes.ok:
1641 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1642 response.status_code))
1643 else:
1644 result = (response.content).replace("\n"," ")
1645
1646 src = re.search('<Vm goldMaster="false"\sstatus="\d+"\sname="(.*?)"\s'
1647 'id="(\w+:\w+:vm:.*?)"\shref="(.*?)"\s'
1648 'type="application/vnd\.vmware\.vcloud\.vm\+xml',result)
1649 if src:
1650 vm_name = src.group(1)
1651 vm_id = src.group(2)
1652 vm_href = src.group(3)
1653
1654 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1655 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1656 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1657
1658 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1659 vdc_id = vdc.get('id').split(':')[-1]
1660 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1661 vdc_id)
1662 data = """<?xml version="1.0" encoding="UTF-8"?>
1663 <InstantiateVAppTemplateParams
1664 xmlns="http://www.vmware.com/vcloud/v1.5"
1665 name="{}"
1666 deploy="false"
1667 powerOn="false"
1668 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1669 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1670 <Description>Vapp instantiation</Description>
1671 <InstantiationParams>
1672 <NetworkConfigSection>
1673 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1674 <NetworkConfig networkName="{}">
1675 <Configuration>
1676 <ParentNetwork href="{}" />
1677 <FenceMode>bridged</FenceMode>
1678 </Configuration>
1679 </NetworkConfig>
1680 </NetworkConfigSection>
1681 <LeaseSettingsSection
1682 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1683 <ovf:Info>Lease Settings</ovf:Info>
1684 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1685 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1686 </LeaseSettingsSection>
1687 </InstantiationParams>
1688 <Source href="{}"/>
1689 <SourcedItem>
1690 <Source href="{}" id="{}" name="{}"
1691 type="application/vnd.vmware.vcloud.vm+xml"/>
1692 <VmGeneralParams>
1693 <NeedsCustomization>false</NeedsCustomization>
1694 </VmGeneralParams>
1695 <InstantiationParams>
1696 <NetworkConnectionSection>
1697 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1698 <NetworkConnection network="{}">
1699 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1700 <IsConnected>true</IsConnected>
1701 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1702 </NetworkConnection>
1703 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1704 <ovf:Info>Virtual hardware requirements</ovf:Info>
1705 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1706 xmlns:vmw="http://www.vmware.com/schema/ovf">
1707 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1708 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1709 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1710 <rasd:InstanceID>4</rasd:InstanceID>
1711 <rasd:Reservation>0</rasd:Reservation>
1712 <rasd:ResourceType>3</rasd:ResourceType>
1713 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1714 <rasd:Weight>0</rasd:Weight>
1715 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1716 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1717 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1718 <rasd:Description>Memory Size</rasd:Description>
1719 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1720 <rasd:InstanceID>5</rasd:InstanceID>
1721 <rasd:Reservation>0</rasd:Reservation>
1722 <rasd:ResourceType>4</rasd:ResourceType>
1723 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1724 <rasd:Weight>0</rasd:Weight>
1725 </ovf:Item>
1726 </ovf:VirtualHardwareSection>
1727 </InstantiationParams>
1728 </SourcedItem>
1729 <AllEULAsAccepted>false</AllEULAsAccepted>
1730 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1731 primary_netname,
1732 primary_net_href,
1733 vapp_tempalte_href,
1734 vm_href,
1735 vm_id,
1736 vm_name,
1737 primary_netname,
1738 cpu=cpus,
1739 core=cores,
1740 memory=memory_mb)
1741
1742 response = self.perform_request(req_type='POST',
1743 url=instantiate_vapp_href,
1744 headers=headers,
1745 data=data)
1746
1747 if response.status_code != 201:
1748 self.logger.error("REST call {} failed reason : {}"\
1749 "status code : {}".format(instantiate_vapp_href,
1750 response.content,
1751 response.status_code))
1752 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1753 "vAapp {}".format(vmname_andid))
1754 else:
1755 vapptask = self.get_task_from_response(response.content)
1756
1757 if vapptask is None and retry==1:
1758 self.get_token() # Retry getting token
1759 continue
1760 else:
1761 break
1762
1763 if vapptask is None or vapptask is False:
1764 raise vimconn.vimconnUnexpectedResponse(
1765 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1766
1767 # wait for task to complete
1768 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1769
1770 if result.get('status') == 'success':
1771 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1772 else:
1773 raise vimconn.vimconnUnexpectedResponse(
1774 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1775
1776 except Exception as exp:
1777 raise vimconn.vimconnUnexpectedResponse(
1778 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1779
1780 # we should have now vapp in undeployed state.
1781 try:
1782 vdc_obj = VDC(self.client, href=vdc.get('href'))
1783 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1784 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1785 vapp = VApp(self.client, resource=vapp_resource)
1786
1787 except Exception as exp:
1788 raise vimconn.vimconnUnexpectedResponse(
1789 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1790 .format(vmname_andid, exp))
1791
1792 if vapp_uuid is None:
1793 raise vimconn.vimconnUnexpectedResponse(
1794 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1795 vmname_andid))
1796
1797 #Add PCI passthrough/SRIOV configrations
1798 vm_obj = None
1799 pci_devices_info = []
1800 reserve_memory = False
1801
1802 for net in net_list:
1803 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1804 pci_devices_info.append(net)
1805 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1806 reserve_memory = True
1807
1808 #Add PCI
1809 if len(pci_devices_info) > 0:
1810 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1811 vmname_andid ))
1812 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1813 pci_devices_info,
1814 vmname_andid)
1815 if PCI_devices_status:
1816 self.logger.info("Added PCI devives {} to VM {}".format(
1817 pci_devices_info,
1818 vmname_andid)
1819 )
1820 reserve_memory = True
1821 else:
1822 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1823 pci_devices_info,
1824 vmname_andid)
1825 )
1826
1827 # Modify vm disk
1828 if vm_disk:
1829 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1830 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1831 if result :
1832 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1833
1834 #Add new or existing disks to vApp
1835 if disk_list:
1836 added_existing_disk = False
1837 for disk in disk_list:
1838 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1839 image_id = disk['image_id']
1840 # Adding CD-ROM to VM
1841 # will revisit code once specification ready to support this feature
1842 self.insert_media_to_vm(vapp, image_id)
1843 elif "image_id" in disk and disk["image_id"] is not None:
1844 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1845 disk["image_id"] , vapp_uuid))
1846 self.add_existing_disk(catalogs=catalogs,
1847 image_id=disk["image_id"],
1848 size = disk["size"],
1849 template_name=templateName,
1850 vapp_uuid=vapp_uuid
1851 )
1852 added_existing_disk = True
1853 else:
1854 #Wait till added existing disk gets reflected into vCD database/API
1855 if added_existing_disk:
1856 time.sleep(5)
1857 added_existing_disk = False
1858 self.add_new_disk(vapp_uuid, disk['size'])
1859
1860 if numas:
1861 # Assigning numa affinity setting
1862 for numa in numas:
1863 if 'paired-threads-id' in numa:
1864 paired_threads_id = numa['paired-threads-id']
1865 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1866
1867 # add NICs & connect to networks in netlist
1868 try:
1869 vdc_obj = VDC(self.client, href=vdc.get('href'))
1870 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1871 vapp = VApp(self.client, resource=vapp_resource)
1872 vapp_id = vapp_resource.get('id').split(':')[-1]
1873
1874 self.logger.info("Removing primary NIC: ")
1875 # First remove all NICs so that NIC properties can be adjusted as needed
1876 self.remove_primary_network_adapter_from_all_vms(vapp)
1877
1878 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1879 primary_nic_index = 0
1880 nicIndex = 0
1881 for net in net_list:
1882 # openmano uses network id in UUID format.
1883 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1884 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1885 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1886
1887 if 'net_id' not in net:
1888 continue
1889
1890 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1891 #Same will be returned in refresh_vms_status() as vim_interface_id
1892 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1893
1894 interface_net_id = net['net_id']
1895 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1896 interface_network_mode = net['use']
1897
1898 if interface_network_mode == 'mgmt':
1899 primary_nic_index = nicIndex
1900
1901 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1902 - DHCP (The IP address is obtained from a DHCP service.)
1903 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1904 - NONE (No IP addressing mode specified.)"""
1905
1906 if primary_netname is not None:
1907 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1908 nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
1909 #For python3
1910 #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1911 if len(nets) == 1:
1912 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1913
1914 if interface_net_name != primary_netname:
1915 # connect network to VM - with all DHCP by default
1916 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1917 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1918
1919 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1920 nic_type = 'VMXNET3'
1921 if 'type' in net and net['type'] not in type_list:
1922 # fetching nic type from vnf
1923 if 'model' in net:
1924 if net['model'] is not None:
1925 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1926 nic_type = 'VMXNET3'
1927 else:
1928 nic_type = net['model']
1929
1930 self.logger.info("new_vminstance(): adding network adapter "\
1931 "to a network {}".format(nets[0].get('name')))
1932 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1933 primary_nic_index,
1934 nicIndex,
1935 net,
1936 nic_type=nic_type)
1937 else:
1938 self.logger.info("new_vminstance(): adding network adapter "\
1939 "to a network {}".format(nets[0].get('name')))
1940 if net['type'] in ['SR-IOV', 'VF']:
1941 nic_type = net['type']
1942 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1943 primary_nic_index,
1944 nicIndex,
1945 net,
1946 nic_type=nic_type)
1947 nicIndex += 1
1948
1949 # cloud-init for ssh-key injection
1950 if cloud_config:
1951 self.cloud_init(vapp,cloud_config)
1952
1953 # If VM has PCI devices or SRIOV reserve memory for VM
1954 if reserve_memory:
1955 self.reserve_memory_for_all_vms(vapp, memory_mb)
1956
1957 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1958
1959 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1960 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1961 if result.get('status') == 'success':
1962 self.logger.info("new_vminstance(): Successfully power on "\
1963 "vApp {}".format(vmname_andid))
1964 else:
1965 self.logger.error("new_vminstance(): failed to power on vApp "\
1966 "{}".format(vmname_andid))
1967
1968 except Exception as exp :
1969 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1970 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1971 .format(name, exp))
1972 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1973 .format(name, exp))
1974
1975 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1976 wait_time = 0
1977 vapp_uuid = None
1978 while wait_time <= MAX_WAIT_TIME:
1979 try:
1980 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1981 vapp = VApp(self.client, resource=vapp_resource)
1982 except Exception as exp:
1983 raise vimconn.vimconnUnexpectedResponse(
1984 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1985 .format(vmname_andid, exp))
1986
1987 #if vapp and vapp.me.deployed:
1988 if vapp and vapp_resource.get('deployed') == 'true':
1989 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1990 break
1991 else:
1992 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1993 time.sleep(INTERVAL_TIME)
1994
1995 wait_time +=INTERVAL_TIME
1996
1997 #SET Affinity Rule for VM
1998 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
1999 #While creating VIM account user has to pass the Host Group names in availability_zone list
2000 #"availability_zone" is a part of VIM "config" parameters
2001 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2002 #Host groups are referred as availability zones
2003 #With following procedure, deployed VM will be added into a VM group.
2004 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2005 if(availability_zone_list):
2006 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2007 #Admin access required for creating Affinity rules
2008 client = self.connect_as_admin()
2009 if not client:
2010 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2011 else:
2012 self.client = client
2013 if self.client:
2014 headers = {'Accept':'application/*+xml;version=27.0',
2015 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2016 #Step1: Get provider vdc details from organization
2017 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2018 if pvdc_href is not None:
2019 #Step2: Found required pvdc, now get resource pool information
2020 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2021 if respool_href is None:
2022 #Raise error if respool_href not found
2023 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2024 .format(pvdc_href)
2025 self.log_message(msg)
2026
2027 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2028 # get availability Zone
2029 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2030 # check if provided av zone(hostGroup) is present in vCD VIM
2031 status = self.check_availibility_zone(vm_az, respool_href, headers)
2032 if status is False:
2033 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2034 "resource pool {} status: {}".format(vm_az,respool_href,status)
2035 self.log_message(msg)
2036 else:
2037 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2038
2039 #Step4: Find VM group references to create vm group
2040 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2041 if vmgrp_href == None:
2042 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2043 self.log_message(msg)
2044
2045 #Step5: Create a VmGroup with name az_VmGroup
2046 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2047 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2048 if status is not True:
2049 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2050 self.log_message(msg)
2051
2052 #VM Group url to add vms to vm group
2053 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2054
2055 #Step6: Add VM to VM Group
2056 #Find VM uuid from vapp_uuid
2057 vm_details = self.get_vapp_details_rest(vapp_uuid)
2058 vm_uuid = vm_details['vmuuid']
2059
2060 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2061 if status is not True:
2062 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2063 self.log_message(msg)
2064
2065 #Step7: Create VM to Host affinity rule
2066 addrule_href = self.get_add_rule_reference (respool_href, headers)
2067 if addrule_href is None:
2068 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2069 .format(respool_href)
2070 self.log_message(msg)
2071
2072 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2073 if status is False:
2074 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2075 .format(name, vm_az)
2076 self.log_message(msg)
2077 else:
2078 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2079 .format(name, vm_az))
2080 #Reset token to a normal user to perform other operations
2081 self.get_token()
2082
2083 if vapp_uuid is not None:
2084 return vapp_uuid, None
2085 else:
2086 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2087
2088
2089 def get_vcd_availibility_zones(self,respool_href, headers):
2090 """ Method to find presence of av zone is VIM resource pool
2091
2092 Args:
2093 respool_href - resource pool href
2094 headers - header information
2095
2096 Returns:
2097 vcd_az - list of azone present in vCD
2098 """
2099 vcd_az = []
2100 url=respool_href
2101 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2102
2103 if resp.status_code != requests.codes.ok:
2104 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2105 else:
2106 #Get the href to hostGroups and find provided hostGroup is present in it
2107 resp_xml = XmlElementTree.fromstring(resp.content)
2108 for child in resp_xml:
2109 if 'VMWProviderVdcResourcePool' in child.tag:
2110 for schild in child:
2111 if 'Link' in schild.tag:
2112 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2113 hostGroup = schild.attrib.get('href')
2114 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2115 if hg_resp.status_code != requests.codes.ok:
2116 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2117 else:
2118 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2119 for hostGroup in hg_resp_xml:
2120 if 'HostGroup' in hostGroup.tag:
2121 #append host group name to the list
2122 vcd_az.append(hostGroup.attrib.get("name"))
2123 return vcd_az
2124
2125
2126 def set_availability_zones(self):
2127 """
2128 Set vim availability zone
2129 """
2130
2131 vim_availability_zones = None
2132 availability_zone = None
2133 if 'availability_zone' in self.config:
2134 vim_availability_zones = self.config.get('availability_zone')
2135 if isinstance(vim_availability_zones, str):
2136 availability_zone = [vim_availability_zones]
2137 elif isinstance(vim_availability_zones, list):
2138 availability_zone = vim_availability_zones
2139 else:
2140 return availability_zone
2141
2142 return availability_zone
2143
2144
2145 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2146 """
2147 Return the availability zone to be used by the created VM.
2148 returns: The VIM availability zone to be used or None
2149 """
2150 if availability_zone_index is None:
2151 if not self.config.get('availability_zone'):
2152 return None
2153 elif isinstance(self.config.get('availability_zone'), str):
2154 return self.config['availability_zone']
2155 else:
2156 return self.config['availability_zone'][0]
2157
2158 vim_availability_zones = self.availability_zone
2159
2160 # check if VIM offer enough availability zones describe in the VNFD
2161 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2162 # check if all the names of NFV AV match VIM AV names
2163 match_by_index = False
2164 for av in availability_zone_list:
2165 if av not in vim_availability_zones:
2166 match_by_index = True
2167 break
2168 if match_by_index:
2169 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2170 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2171 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2172 self.logger.debug("VIM Availability zones will be used by index")
2173 return vim_availability_zones[availability_zone_index]
2174 else:
2175 return availability_zone_list[availability_zone_index]
2176 else:
2177 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2178
2179
2180 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2181 """ Method to create VM to Host Affinity rule in vCD
2182
2183 Args:
2184 addrule_href - href to make a POST request
2185 vmgrpname - name of the VM group created
2186 hostgrpnmae - name of the host group created earlier
2187 polarity - Affinity or Anti-affinity (default: Affinity)
2188 headers - headers to make REST call
2189
2190 Returns:
2191 True- if rule is created
2192 False- Failed to create rule due to some error
2193
2194 """
2195 task_status = False
2196 rule_name = polarity + "_" + vmgrpname
2197 payload = """<?xml version="1.0" encoding="UTF-8"?>
2198 <vmext:VMWVmHostAffinityRule
2199 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2200 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2201 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2202 <vcloud:Name>{}</vcloud:Name>
2203 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2204 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2205 <vcloud:Polarity>{}</vcloud:Polarity>
2206 <vmext:HostGroupName>{}</vmext:HostGroupName>
2207 <vmext:VmGroupName>{}</vmext:VmGroupName>
2208 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2209
2210 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2211
2212 if resp.status_code != requests.codes.accepted:
2213 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2214 task_status = False
2215 return task_status
2216 else:
2217 affinity_task = self.get_task_from_response(resp.content)
2218 self.logger.debug ("affinity_task: {}".format(affinity_task))
2219 if affinity_task is None or affinity_task is False:
2220 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2221 # wait for task to complete
2222 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2223 if result.get('status') == 'success':
2224 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2225 return True
2226 else:
2227 raise vimconn.vimconnUnexpectedResponse(
2228 "failed to create affinity rule {}".format(rule_name))
2229
2230
2231 def get_add_rule_reference (self, respool_href, headers):
2232 """ This method finds href to add vm to host affinity rule to vCD
2233
2234 Args:
2235 respool_href- href to resource pool
2236 headers- header information to make REST call
2237
2238 Returns:
2239 None - if no valid href to add rule found or
2240 addrule_href - href to add vm to host affinity rule of resource pool
2241 """
2242 addrule_href = None
2243 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2244
2245 if resp.status_code != requests.codes.ok:
2246 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2247 else:
2248
2249 resp_xml = XmlElementTree.fromstring(resp.content)
2250 for child in resp_xml:
2251 if 'VMWProviderVdcResourcePool' in child.tag:
2252 for schild in child:
2253 if 'Link' in schild.tag:
2254 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2255 schild.attrib.get('rel') == "add":
2256 addrule_href = schild.attrib.get('href')
2257 break
2258
2259 return addrule_href
2260
2261
2262 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2263 """ Method to add deployed VM to newly created VM Group.
2264 This is required to create VM to Host affinity in vCD
2265
2266 Args:
2267 vm_uuid- newly created vm uuid
2268 vmGroupNameURL- URL to VM Group name
2269 vmGroup_name- Name of VM group created
2270 headers- Headers for REST request
2271
2272 Returns:
2273 True- if VM added to VM group successfully
2274 False- if any error encounter
2275 """
2276
2277 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2278
2279 if addvm_resp.status_code != requests.codes.ok:
2280 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2281 .format(vmGroupNameURL, addvm_resp.status_code))
2282 return False
2283 else:
2284 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2285 for child in resp_xml:
2286 if child.tag.split('}')[1] == 'Link':
2287 if child.attrib.get("rel") == "addVms":
2288 addvmtogrpURL = child.attrib.get("href")
2289
2290 #Get vm details
2291 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2292 vmdetailsURL = ''.join(url_list)
2293
2294 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2295
2296 if resp.status_code != requests.codes.ok:
2297 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2298 return False
2299
2300 #Parse VM details
2301 resp_xml = XmlElementTree.fromstring(resp.content)
2302 if resp_xml.tag.split('}')[1] == "Vm":
2303 vm_id = resp_xml.attrib.get("id")
2304 vm_name = resp_xml.attrib.get("name")
2305 vm_href = resp_xml.attrib.get("href")
2306 #print vm_id, vm_name, vm_href
2307 #Add VM into VMgroup
2308 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2309 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2310 xmlns="http://www.vmware.com/vcloud/versions" \
2311 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2312 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2313 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2314 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2315 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2316 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2317 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2318 <ns2:VmReference href="{}" id="{}" name="{}" \
2319 type="application/vnd.vmware.vcloud.vm+xml" />\
2320 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2321
2322 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2323
2324 if addvmtogrp_resp.status_code != requests.codes.accepted:
2325 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2326 return False
2327 else:
2328 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2329 return True
2330
2331
2332 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2333 """Method to create a VM group in vCD
2334
2335 Args:
2336 vmgroup_name : Name of VM group to be created
2337 vmgroup_href : href for vmgroup
2338 headers- Headers for REST request
2339 """
2340 #POST to add URL with required data
2341 vmgroup_status = False
2342 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2343 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2344 <vmCount>1</vmCount>\
2345 </VMWVmGroup>""".format(vmgroup_name)
2346 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2347
2348 if resp.status_code != requests.codes.accepted:
2349 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2350 return vmgroup_status
2351 else:
2352 vmgroup_task = self.get_task_from_response(resp.content)
2353 if vmgroup_task is None or vmgroup_task is False:
2354 raise vimconn.vimconnUnexpectedResponse(
2355 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2356
2357 # wait for task to complete
2358 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2359
2360 if result.get('status') == 'success':
2361 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2362 #time.sleep(10)
2363 vmgroup_status = True
2364 return vmgroup_status
2365 else:
2366 raise vimconn.vimconnUnexpectedResponse(\
2367 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2368
2369
2370 def find_vmgroup_reference(self, url, headers):
2371 """ Method to create a new VMGroup which is required to add created VM
2372 Args:
2373 url- resource pool href
2374 headers- header information
2375
2376 Returns:
2377 returns href to VM group to create VM group
2378 """
2379 #Perform GET on resource pool to find 'add' link to create VMGroup
2380 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2381 vmgrp_href = None
2382 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2383
2384 if resp.status_code != requests.codes.ok:
2385 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2386 else:
2387 #Get the href to add vmGroup to vCD
2388 resp_xml = XmlElementTree.fromstring(resp.content)
2389 for child in resp_xml:
2390 if 'VMWProviderVdcResourcePool' in child.tag:
2391 for schild in child:
2392 if 'Link' in schild.tag:
2393 #Find href with type VMGroup and rel with add
2394 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2395 and schild.attrib.get('rel') == "add":
2396 vmgrp_href = schild.attrib.get('href')
2397 return vmgrp_href
2398
2399
2400 def check_availibility_zone(self, az, respool_href, headers):
2401 """ Method to verify requested av zone is present or not in provided
2402 resource pool
2403
2404 Args:
2405 az - name of hostgroup (availibility_zone)
2406 respool_href - Resource Pool href
2407 headers - Headers to make REST call
2408 Returns:
2409 az_found - True if availibility_zone is found else False
2410 """
2411 az_found = False
2412 headers['Accept']='application/*+xml;version=27.0'
2413 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2414
2415 if resp.status_code != requests.codes.ok:
2416 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2417 else:
2418 #Get the href to hostGroups and find provided hostGroup is present in it
2419 resp_xml = XmlElementTree.fromstring(resp.content)
2420
2421 for child in resp_xml:
2422 if 'VMWProviderVdcResourcePool' in child.tag:
2423 for schild in child:
2424 if 'Link' in schild.tag:
2425 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2426 hostGroup_href = schild.attrib.get('href')
2427 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2428 if hg_resp.status_code != requests.codes.ok:
2429 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2430 else:
2431 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2432 for hostGroup in hg_resp_xml:
2433 if 'HostGroup' in hostGroup.tag:
2434 if hostGroup.attrib.get("name") == az:
2435 az_found = True
2436 break
2437 return az_found
2438
2439
2440 def get_pvdc_for_org(self, org_vdc, headers):
2441 """ This method gets provider vdc references from organisation
2442
2443 Args:
2444 org_vdc - name of the organisation VDC to find pvdc
2445 headers - headers to make REST call
2446
2447 Returns:
2448 None - if no pvdc href found else
2449 pvdc_href - href to pvdc
2450 """
2451
2452 #Get provider VDC references from vCD
2453 pvdc_href = None
2454 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2455 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2456 url = ''.join(url_list)
2457
2458 response = self.perform_request(req_type='GET',url=url, headers=headers)
2459 if response.status_code != requests.codes.ok:
2460 self.logger.debug ("REST API call {} failed. Return status code {}"\
2461 .format(url, response.status_code))
2462 else:
2463 xmlroot_response = XmlElementTree.fromstring(response.content)
2464 for child in xmlroot_response:
2465 if 'ProviderVdcReference' in child.tag:
2466 pvdc_href = child.attrib.get('href')
2467 #Get vdcReferences to find org
2468 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2469 if pvdc_resp.status_code != requests.codes.ok:
2470 raise vimconn.vimconnException("REST API call {} failed. "\
2471 "Return status code {}"\
2472 .format(url, pvdc_resp.status_code))
2473
2474 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2475 for child in pvdc_resp_xml:
2476 if 'Link' in child.tag:
2477 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2478 vdc_href = child.attrib.get('href')
2479
2480 #Check if provided org is present in vdc
2481 vdc_resp = self.perform_request(req_type='GET',
2482 url=vdc_href,
2483 headers=headers)
2484 if vdc_resp.status_code != requests.codes.ok:
2485 raise vimconn.vimconnException("REST API call {} failed. "\
2486 "Return status code {}"\
2487 .format(url, vdc_resp.status_code))
2488 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2489 for child in vdc_resp_xml:
2490 if 'VdcReference' in child.tag:
2491 if child.attrib.get('name') == org_vdc:
2492 return pvdc_href
2493
2494
2495 def get_resource_pool_details(self, pvdc_href, headers):
2496 """ Method to get resource pool information.
2497 Host groups are property of resource group.
2498 To get host groups, we need to GET details of resource pool.
2499
2500 Args:
2501 pvdc_href: href to pvdc details
2502 headers: headers
2503
2504 Returns:
2505 respool_href - Returns href link reference to resource pool
2506 """
2507 respool_href = None
2508 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2509
2510 if resp.status_code != requests.codes.ok:
2511 self.logger.debug ("REST API call {} failed. Return status code {}"\
2512 .format(pvdc_href, resp.status_code))
2513 else:
2514 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2515 for child in respool_resp_xml:
2516 if 'Link' in child.tag:
2517 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2518 respool_href = child.attrib.get("href")
2519 break
2520 return respool_href
2521
2522
2523 def log_message(self, msg):
2524 """
2525 Method to log error messages related to Affinity rule creation
2526 in new_vminstance & raise Exception
2527 Args :
2528 msg - Error message to be logged
2529
2530 """
2531 #get token to connect vCD as a normal user
2532 self.get_token()
2533 self.logger.debug(msg)
2534 raise vimconn.vimconnException(msg)
2535
2536
2537 ##
2538 ##
2539 ## based on current discussion
2540 ##
2541 ##
2542 ## server:
2543 # created: '2016-09-08T11:51:58'
2544 # description: simple-instance.linux1.1
2545 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2546 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2547 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2548 # status: ACTIVE
2549 # error_msg:
2550 # interfaces: …
2551 #
2552 def get_vminstance(self, vim_vm_uuid=None):
2553 """Returns the VM instance information from VIM"""
2554
2555 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2556
2557 org, vdc = self.get_vdc_details()
2558 if vdc is None:
2559 raise vimconn.vimconnConnectionException(
2560 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2561
2562 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2563 if not vm_info_dict:
2564 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2565 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2566
2567 status_key = vm_info_dict['status']
2568 error = ''
2569 try:
2570 vm_dict = {'created': vm_info_dict['created'],
2571 'description': vm_info_dict['name'],
2572 'status': vcdStatusCode2manoFormat[int(status_key)],
2573 'hostId': vm_info_dict['vmuuid'],
2574 'error_msg': error,
2575 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2576
2577 if 'interfaces' in vm_info_dict:
2578 vm_dict['interfaces'] = vm_info_dict['interfaces']
2579 else:
2580 vm_dict['interfaces'] = []
2581 except KeyError:
2582 vm_dict = {'created': '',
2583 'description': '',
2584 'status': vcdStatusCode2manoFormat[int(-1)],
2585 'hostId': vm_info_dict['vmuuid'],
2586 'error_msg': "Inconsistency state",
2587 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2588
2589 return vm_dict
2590
2591 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2592 """Method poweroff and remove VM instance from vcloud director network.
2593
2594 Args:
2595 vm__vim_uuid: VM UUID
2596
2597 Returns:
2598 Returns the instance identifier
2599 """
2600
2601 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2602
2603 org, vdc = self.get_vdc_details()
2604 vdc_obj = VDC(self.client, href=vdc.get('href'))
2605 if vdc_obj is None:
2606 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2607 self.tenant_name))
2608 raise vimconn.vimconnException(
2609 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2610
2611 try:
2612 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2613 if vapp_name is None:
2614 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2615 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2616 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2617 vapp_resource = vdc_obj.get_vapp(vapp_name)
2618 vapp = VApp(self.client, resource=vapp_resource)
2619
2620 # Delete vApp and wait for status change if task executed and vApp is None.
2621
2622 if vapp:
2623 if vapp_resource.get('deployed') == 'true':
2624 self.logger.info("Powering off vApp {}".format(vapp_name))
2625 #Power off vApp
2626 powered_off = False
2627 wait_time = 0
2628 while wait_time <= MAX_WAIT_TIME:
2629 power_off_task = vapp.power_off()
2630 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2631
2632 if result.get('status') == 'success':
2633 powered_off = True
2634 break
2635 else:
2636 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2637 time.sleep(INTERVAL_TIME)
2638
2639 wait_time +=INTERVAL_TIME
2640 if not powered_off:
2641 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2642 else:
2643 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2644
2645 #Undeploy vApp
2646 self.logger.info("Undeploy vApp {}".format(vapp_name))
2647 wait_time = 0
2648 undeployed = False
2649 while wait_time <= MAX_WAIT_TIME:
2650 vapp = VApp(self.client, resource=vapp_resource)
2651 if not vapp:
2652 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2653 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2654 undeploy_task = vapp.undeploy()
2655
2656 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2657 if result.get('status') == 'success':
2658 undeployed = True
2659 break
2660 else:
2661 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2662 time.sleep(INTERVAL_TIME)
2663
2664 wait_time +=INTERVAL_TIME
2665
2666 if not undeployed:
2667 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2668
2669 # delete vapp
2670 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2671
2672 if vapp is not None:
2673 wait_time = 0
2674 result = False
2675
2676 while wait_time <= MAX_WAIT_TIME:
2677 vapp = VApp(self.client, resource=vapp_resource)
2678 if not vapp:
2679 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2680 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2681
2682 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2683
2684 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2685 if result.get('status') == 'success':
2686 break
2687 else:
2688 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2689 time.sleep(INTERVAL_TIME)
2690
2691 wait_time +=INTERVAL_TIME
2692
2693 if result is None:
2694 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2695 else:
2696 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2697 return vm__vim_uuid
2698 except:
2699 self.logger.debug(traceback.format_exc())
2700 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2701
2702
2703 def refresh_vms_status(self, vm_list):
2704 """Get the status of the virtual machines and their interfaces/ports
2705 Params: the list of VM identifiers
2706 Returns a dictionary with:
2707 vm_id: #VIM id of this Virtual Machine
2708 status: #Mandatory. Text with one of:
2709 # DELETED (not found at vim)
2710 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2711 # OTHER (Vim reported other status not understood)
2712 # ERROR (VIM indicates an ERROR status)
2713 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2714 # CREATING (on building process), ERROR
2715 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2716 #
2717 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2718 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2719 interfaces:
2720 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2721 mac_address: #Text format XX:XX:XX:XX:XX:XX
2722 vim_net_id: #network id where this interface is connected
2723 vim_interface_id: #interface/port VIM id
2724 ip_address: #null, or text with IPv4, IPv6 address
2725 """
2726
2727 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2728
2729 org,vdc = self.get_vdc_details()
2730 if vdc is None:
2731 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2732
2733 vms_dict = {}
2734 nsx_edge_list = []
2735 for vmuuid in vm_list:
2736 vapp_name = self.get_namebyvappid(vmuuid)
2737 if vapp_name is not None:
2738
2739 try:
2740 vm_pci_details = self.get_vm_pci_details(vmuuid)
2741 vdc_obj = VDC(self.client, href=vdc.get('href'))
2742 vapp_resource = vdc_obj.get_vapp(vapp_name)
2743 the_vapp = VApp(self.client, resource=vapp_resource)
2744
2745 vm_details = {}
2746 for vm in the_vapp.get_all_vms():
2747 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2748 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2749 response = self.perform_request(req_type='GET',
2750 url=vm.get('href'),
2751 headers=headers)
2752
2753 if response.status_code != 200:
2754 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2755 "status code : {}".format(vm.get('href'),
2756 response.content,
2757 response.status_code))
2758 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2759 "VM details")
2760 xmlroot = XmlElementTree.fromstring(response.content)
2761
2762
2763 result = response.content.replace("\n"," ")
2764 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2765 if hdd_match:
2766 hdd_mb = hdd_match.group(1)
2767 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2768 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2769 if cpus_match:
2770 cpus = cpus_match.group(1)
2771 vm_details['cpus'] = int(cpus) if cpus else None
2772 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2773 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2774 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2775 vm_details['id'] = xmlroot.get('id')
2776 vm_details['name'] = xmlroot.get('name')
2777 vm_info = [vm_details]
2778 if vm_pci_details:
2779 vm_info[0].update(vm_pci_details)
2780
2781 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2782 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2783 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2784
2785 # get networks
2786 vm_ip = None
2787 vm_mac = None
2788 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2789 for network in networks:
2790 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2791 vm_mac = mac_s.group(1) if mac_s else None
2792 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2793 vm_ip = ip_s.group(1) if ip_s else None
2794
2795 if vm_ip is None:
2796 if not nsx_edge_list:
2797 nsx_edge_list = self.get_edge_details()
2798 if nsx_edge_list is None:
2799 raise vimconn.vimconnException("refresh_vms_status:"\
2800 "Failed to get edge details from NSX Manager")
2801 if vm_mac is not None:
2802 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2803
2804 net_s = re.search('network="(.*?)"',network)
2805 network_name = net_s.group(1) if net_s else None
2806
2807 vm_net_id = self.get_network_id_by_name(network_name)
2808 interface = {"mac_address": vm_mac,
2809 "vim_net_id": vm_net_id,
2810 "vim_interface_id": vm_net_id,
2811 "ip_address": vm_ip}
2812
2813 vm_dict["interfaces"].append(interface)
2814
2815 # add a vm to vm dict
2816 vms_dict.setdefault(vmuuid, vm_dict)
2817 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2818 except Exception as exp:
2819 self.logger.debug("Error in response {}".format(exp))
2820 self.logger.debug(traceback.format_exc())
2821
2822 return vms_dict
2823
2824
2825 def get_edge_details(self):
2826 """Get the NSX edge list from NSX Manager
2827 Returns list of NSX edges
2828 """
2829 edge_list = []
2830 rheaders = {'Content-Type': 'application/xml'}
2831 nsx_api_url = '/api/4.0/edges'
2832
2833 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2834
2835 try:
2836 resp = requests.get(self.nsx_manager + nsx_api_url,
2837 auth = (self.nsx_user, self.nsx_password),
2838 verify = False, headers = rheaders)
2839 if resp.status_code == requests.codes.ok:
2840 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2841 for edge_pages in paged_Edge_List:
2842 if edge_pages.tag == 'edgePage':
2843 for edge_summary in edge_pages:
2844 if edge_summary.tag == 'pagingInfo':
2845 for element in edge_summary:
2846 if element.tag == 'totalCount' and element.text == '0':
2847 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2848 .format(self.nsx_manager))
2849
2850 if edge_summary.tag == 'edgeSummary':
2851 for element in edge_summary:
2852 if element.tag == 'id':
2853 edge_list.append(element.text)
2854 else:
2855 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2856 .format(self.nsx_manager))
2857
2858 if not edge_list:
2859 raise vimconn.vimconnException("get_edge_details: "\
2860 "No NSX edge details found: {}"
2861 .format(self.nsx_manager))
2862 else:
2863 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2864 return edge_list
2865 else:
2866 self.logger.debug("get_edge_details: "
2867 "Failed to get NSX edge details from NSX Manager: {}"
2868 .format(resp.content))
2869 return None
2870
2871 except Exception as exp:
2872 self.logger.debug("get_edge_details: "\
2873 "Failed to get NSX edge details from NSX Manager: {}"
2874 .format(exp))
2875 raise vimconn.vimconnException("get_edge_details: "\
2876 "Failed to get NSX edge details from NSX Manager: {}"
2877 .format(exp))
2878
2879
2880 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2881 """Get IP address details from NSX edges, using the MAC address
2882 PARAMS: nsx_edges : List of NSX edges
2883 mac_address : Find IP address corresponding to this MAC address
2884 Returns: IP address corrresponding to the provided MAC address
2885 """
2886
2887 ip_addr = None
2888 rheaders = {'Content-Type': 'application/xml'}
2889
2890 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2891
2892 try:
2893 for edge in nsx_edges:
2894 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2895
2896 resp = requests.get(self.nsx_manager + nsx_api_url,
2897 auth = (self.nsx_user, self.nsx_password),
2898 verify = False, headers = rheaders)
2899
2900 if resp.status_code == requests.codes.ok:
2901 dhcp_leases = XmlElementTree.fromstring(resp.text)
2902 for child in dhcp_leases:
2903 if child.tag == 'dhcpLeaseInfo':
2904 dhcpLeaseInfo = child
2905 for leaseInfo in dhcpLeaseInfo:
2906 for elem in leaseInfo:
2907 if (elem.tag)=='macAddress':
2908 edge_mac_addr = elem.text
2909 if (elem.tag)=='ipAddress':
2910 ip_addr = elem.text
2911 if edge_mac_addr is not None:
2912 if edge_mac_addr == mac_address:
2913 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2914 .format(ip_addr, mac_address,edge))
2915 return ip_addr
2916 else:
2917 self.logger.debug("get_ipaddr_from_NSXedge: "\
2918 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2919 .format(resp.content))
2920
2921 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2922 return None
2923
2924 except XmlElementTree.ParseError as Err:
2925 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2926
2927
2928 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2929 """Send and action over a VM instance from VIM
2930 Returns the vm_id if the action was successfully sent to the VIM"""
2931
2932 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2933 if vm__vim_uuid is None or action_dict is None:
2934 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2935
2936 org, vdc = self.get_vdc_details()
2937 if vdc is None:
2938 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2939
2940 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2941 if vapp_name is None:
2942 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2943 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2944 else:
2945 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2946
2947 try:
2948 vdc_obj = VDC(self.client, href=vdc.get('href'))
2949 vapp_resource = vdc_obj.get_vapp(vapp_name)
2950 vapp = VApp(self.client, resource=vapp_resource)
2951 if "start" in action_dict:
2952 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2953 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2954 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2955 self.instance_actions_result("start", result, vapp_name)
2956 elif "rebuild" in action_dict:
2957 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2958 rebuild_task = vapp.deploy(power_on=True)
2959 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
2960 self.instance_actions_result("rebuild", result, vapp_name)
2961 elif "pause" in action_dict:
2962 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2963 pause_task = vapp.undeploy(action='suspend')
2964 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
2965 self.instance_actions_result("pause", result, vapp_name)
2966 elif "resume" in action_dict:
2967 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2968 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2969 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2970 self.instance_actions_result("resume", result, vapp_name)
2971 elif "shutoff" in action_dict or "shutdown" in action_dict:
2972 action_name , value = action_dict.items()[0]
2973 #For python3
2974 #action_name , value = list(action_dict.items())[0]
2975 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2976 shutdown_task = vapp.shutdown()
2977 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
2978 if action_name == "shutdown":
2979 self.instance_actions_result("shutdown", result, vapp_name)
2980 else:
2981 self.instance_actions_result("shutoff", result, vapp_name)
2982 elif "forceOff" in action_dict:
2983 result = vapp.undeploy(action='powerOff')
2984 self.instance_actions_result("forceOff", result, vapp_name)
2985 elif "reboot" in action_dict:
2986 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2987 reboot_task = vapp.reboot()
2988 self.client.get_task_monitor().wait_for_success(task=reboot_task)
2989 else:
2990 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2991 return vm__vim_uuid
2992 except Exception as exp :
2993 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2994 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2995
2996 def instance_actions_result(self, action, result, vapp_name):
2997 if result.get('status') == 'success':
2998 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2999 else:
3000 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3001
3002 def get_vminstance_console(self, vm_id, console_type="vnc"):
3003 """
3004 Get a console for the virtual machine
3005 Params:
3006 vm_id: uuid of the VM
3007 console_type, can be:
3008 "novnc" (by default), "xvpvnc" for VNC types,
3009 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3010 Returns dict with the console parameters:
3011 protocol: ssh, ftp, http, https, ...
3012 server: usually ip address
3013 port: the http, ssh, ... port
3014 suffix: extra text, e.g. the http path and query string
3015 """
3016 raise vimconn.vimconnNotImplemented("Should have implemented this")
3017
3018 # NOT USED METHODS in current version
3019
3020 def host_vim2gui(self, host, server_dict):
3021 """Transform host dictionary from VIM format to GUI format,
3022 and append to the server_dict
3023 """
3024 raise vimconn.vimconnNotImplemented("Should have implemented this")
3025
3026 def get_hosts_info(self):
3027 """Get the information of deployed hosts
3028 Returns the hosts content"""
3029 raise vimconn.vimconnNotImplemented("Should have implemented this")
3030
3031 def get_hosts(self, vim_tenant):
3032 """Get the hosts and deployed instances
3033 Returns the hosts content"""
3034 raise vimconn.vimconnNotImplemented("Should have implemented this")
3035
3036 def get_processor_rankings(self):
3037 """Get the processor rankings in the VIM database"""
3038 raise vimconn.vimconnNotImplemented("Should have implemented this")
3039
3040 def new_host(self, host_data):
3041 """Adds a new host to VIM"""
3042 '''Returns status code of the VIM response'''
3043 raise vimconn.vimconnNotImplemented("Should have implemented this")
3044
3045 def new_external_port(self, port_data):
3046 """Adds a external port to VIM"""
3047 '''Returns the port identifier'''
3048 raise vimconn.vimconnNotImplemented("Should have implemented this")
3049
3050 def new_external_network(self, net_name, net_type):
3051 """Adds a external network to VIM (shared)"""
3052 '''Returns the network identifier'''
3053 raise vimconn.vimconnNotImplemented("Should have implemented this")
3054
3055 def connect_port_network(self, port_id, network_id, admin=False):
3056 """Connects a external port to a network"""
3057 '''Returns status code of the VIM response'''
3058 raise vimconn.vimconnNotImplemented("Should have implemented this")
3059
3060 def new_vminstancefromJSON(self, vm_data):
3061 """Adds a VM instance to VIM"""
3062 '''Returns the instance identifier'''
3063 raise vimconn.vimconnNotImplemented("Should have implemented this")
3064
3065 def get_network_name_by_id(self, network_uuid=None):
3066 """Method gets vcloud director network named based on supplied uuid.
3067
3068 Args:
3069 network_uuid: network_id
3070
3071 Returns:
3072 The return network name.
3073 """
3074
3075 if not network_uuid:
3076 return None
3077
3078 try:
3079 org_dict = self.get_org(self.org_uuid)
3080 if 'networks' in org_dict:
3081 org_network_dict = org_dict['networks']
3082 for net_uuid in org_network_dict:
3083 if net_uuid == network_uuid:
3084 return org_network_dict[net_uuid]
3085 except:
3086 self.logger.debug("Exception in get_network_name_by_id")
3087 self.logger.debug(traceback.format_exc())
3088
3089 return None
3090
3091 def get_network_id_by_name(self, network_name=None):
3092 """Method gets vcloud director network uuid based on supplied name.
3093
3094 Args:
3095 network_name: network_name
3096 Returns:
3097 The return network uuid.
3098 network_uuid: network_id
3099 """
3100
3101 if not network_name:
3102 self.logger.debug("get_network_id_by_name() : Network name is empty")
3103 return None
3104
3105 try:
3106 org_dict = self.get_org(self.org_uuid)
3107 if org_dict and 'networks' in org_dict:
3108 org_network_dict = org_dict['networks']
3109 for net_uuid,net_name in org_network_dict.iteritems():
3110 #For python3
3111 #for net_uuid,net_name in org_network_dict.items():
3112 if net_name == network_name:
3113 return net_uuid
3114
3115 except KeyError as exp:
3116 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3117
3118 return None
3119
3120 def list_org_action(self):
3121 """
3122 Method leverages vCloud director and query for available organization for particular user
3123
3124 Args:
3125 vca - is active VCA connection.
3126 vdc_name - is a vdc name that will be used to query vms action
3127
3128 Returns:
3129 The return XML respond
3130 """
3131 url_list = [self.url, '/api/org']
3132 vm_list_rest_call = ''.join(url_list)
3133
3134 if self.client._session:
3135 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3136 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3137
3138 response = self.perform_request(req_type='GET',
3139 url=vm_list_rest_call,
3140 headers=headers)
3141
3142 if response.status_code == 403:
3143 response = self.retry_rest('GET', vm_list_rest_call)
3144
3145 if response.status_code == requests.codes.ok:
3146 return response.content
3147
3148 return None
3149
3150 def get_org_action(self, org_uuid=None):
3151 """
3152 Method leverages vCloud director and retrieve available object for organization.
3153
3154 Args:
3155 org_uuid - vCD organization uuid
3156 self.client - is active connection.
3157
3158 Returns:
3159 The return XML respond
3160 """
3161
3162 if org_uuid is None:
3163 return None
3164
3165 url_list = [self.url, '/api/org/', org_uuid]
3166 vm_list_rest_call = ''.join(url_list)
3167
3168 if self.client._session:
3169 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3170 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3171
3172 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3173 response = self.perform_request(req_type='GET',
3174 url=vm_list_rest_call,
3175 headers=headers)
3176 if response.status_code == 403:
3177 response = self.retry_rest('GET', vm_list_rest_call)
3178
3179 if response.status_code == requests.codes.ok:
3180 return response.content
3181 return None
3182
3183 def get_org(self, org_uuid=None):
3184 """
3185 Method retrieves available organization in vCloud Director
3186
3187 Args:
3188 org_uuid - is a organization uuid.
3189
3190 Returns:
3191 The return dictionary with following key
3192 "network" - for network list under the org
3193 "catalogs" - for network list under the org
3194 "vdcs" - for vdc list under org
3195 """
3196
3197 org_dict = {}
3198
3199 if org_uuid is None:
3200 return org_dict
3201
3202 content = self.get_org_action(org_uuid=org_uuid)
3203 try:
3204 vdc_list = {}
3205 network_list = {}
3206 catalog_list = {}
3207 vm_list_xmlroot = XmlElementTree.fromstring(content)
3208 for child in vm_list_xmlroot:
3209 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3210 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3211 org_dict['vdcs'] = vdc_list
3212 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3213 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3214 org_dict['networks'] = network_list
3215 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3216 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3217 org_dict['catalogs'] = catalog_list
3218 except:
3219 pass
3220
3221 return org_dict
3222
3223 def get_org_list(self):
3224 """
3225 Method retrieves available organization in vCloud Director
3226
3227 Args:
3228 vca - is active VCA connection.
3229
3230 Returns:
3231 The return dictionary and key for each entry VDC UUID
3232 """
3233
3234 org_dict = {}
3235
3236 content = self.list_org_action()
3237 try:
3238 vm_list_xmlroot = XmlElementTree.fromstring(content)
3239 for vm_xml in vm_list_xmlroot:
3240 if vm_xml.tag.split("}")[1] == 'Org':
3241 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3242 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3243 except:
3244 pass
3245
3246 return org_dict
3247
3248 def vms_view_action(self, vdc_name=None):
3249 """ Method leverages vCloud director vms query call
3250
3251 Args:
3252 vca - is active VCA connection.
3253 vdc_name - is a vdc name that will be used to query vms action
3254
3255 Returns:
3256 The return XML respond
3257 """
3258 vca = self.connect()
3259 if vdc_name is None:
3260 return None
3261
3262 url_list = [vca.host, '/api/vms/query']
3263 vm_list_rest_call = ''.join(url_list)
3264
3265 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3266 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
3267 vca.vcloud_session.organization.Link)
3268 #For python3
3269 #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
3270 # ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3271 if len(refs) == 1:
3272 response = Http.get(url=vm_list_rest_call,
3273 headers=vca.vcloud_session.get_vcloud_headers(),
3274 verify=vca.verify,
3275 logger=vca.logger)
3276 if response.status_code == requests.codes.ok:
3277 return response.content
3278
3279 return None
3280
3281 def get_vapp_list(self, vdc_name=None):
3282 """
3283 Method retrieves vApp list deployed vCloud director and returns a dictionary
3284 contains a list of all vapp deployed for queried VDC.
3285 The key for a dictionary is vApp UUID
3286
3287
3288 Args:
3289 vca - is active VCA connection.
3290 vdc_name - is a vdc name that will be used to query vms action
3291
3292 Returns:
3293 The return dictionary and key for each entry vapp UUID
3294 """
3295
3296 vapp_dict = {}
3297 if vdc_name is None:
3298 return vapp_dict
3299
3300 content = self.vms_view_action(vdc_name=vdc_name)
3301 try:
3302 vm_list_xmlroot = XmlElementTree.fromstring(content)
3303 for vm_xml in vm_list_xmlroot:
3304 if vm_xml.tag.split("}")[1] == 'VMRecord':
3305 if vm_xml.attrib['isVAppTemplate'] == 'true':
3306 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3307 if 'vappTemplate-' in rawuuid[0]:
3308 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3309 # vm and use raw UUID as key
3310 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3311 except:
3312 pass
3313
3314 return vapp_dict
3315
3316 def get_vm_list(self, vdc_name=None):
3317 """
3318 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3319 contains a list of all VM's deployed for queried VDC.
3320 The key for a dictionary is VM UUID
3321
3322
3323 Args:
3324 vca - is active VCA connection.
3325 vdc_name - is a vdc name that will be used to query vms action
3326
3327 Returns:
3328 The return dictionary and key for each entry vapp UUID
3329 """
3330 vm_dict = {}
3331
3332 if vdc_name is None:
3333 return vm_dict
3334
3335 content = self.vms_view_action(vdc_name=vdc_name)
3336 try:
3337 vm_list_xmlroot = XmlElementTree.fromstring(content)
3338 for vm_xml in vm_list_xmlroot:
3339 if vm_xml.tag.split("}")[1] == 'VMRecord':
3340 if vm_xml.attrib['isVAppTemplate'] == 'false':
3341 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3342 if 'vm-' in rawuuid[0]:
3343 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3344 # vm and use raw UUID as key
3345 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3346 except:
3347 pass
3348
3349 return vm_dict
3350
3351 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3352 """
3353 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3354 contains a list of all VM's deployed for queried VDC.
3355 The key for a dictionary is VM UUID
3356
3357
3358 Args:
3359 vca - is active VCA connection.
3360 vdc_name - is a vdc name that will be used to query vms action
3361
3362 Returns:
3363 The return dictionary and key for each entry vapp UUID
3364 """
3365 vm_dict = {}
3366 vca = self.connect()
3367 if not vca:
3368 raise vimconn.vimconnConnectionException("self.connect() is failed")
3369
3370 if vdc_name is None:
3371 return vm_dict
3372
3373 content = self.vms_view_action(vdc_name=vdc_name)
3374 try:
3375 vm_list_xmlroot = XmlElementTree.fromstring(content)
3376 for vm_xml in vm_list_xmlroot:
3377 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3378 # lookup done by UUID
3379 if isuuid:
3380 if vapp_name in vm_xml.attrib['container']:
3381 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3382 if 'vm-' in rawuuid[0]:
3383 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3384 break
3385 # lookup done by Name
3386 else:
3387 if vapp_name in vm_xml.attrib['name']:
3388 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3389 if 'vm-' in rawuuid[0]:
3390 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3391 break
3392 except:
3393 pass
3394
3395 return vm_dict
3396
3397 def get_network_action(self, network_uuid=None):
3398 """
3399 Method leverages vCloud director and query network based on network uuid
3400
3401 Args:
3402 vca - is active VCA connection.
3403 network_uuid - is a network uuid
3404
3405 Returns:
3406 The return XML respond
3407 """
3408
3409 if network_uuid is None:
3410 return None
3411
3412 url_list = [self.url, '/api/network/', network_uuid]
3413 vm_list_rest_call = ''.join(url_list)
3414
3415 if self.client._session:
3416 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3417 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3418
3419 response = self.perform_request(req_type='GET',
3420 url=vm_list_rest_call,
3421 headers=headers)
3422 #Retry login if session expired & retry sending request
3423 if response.status_code == 403:
3424 response = self.retry_rest('GET', vm_list_rest_call)
3425
3426 if response.status_code == requests.codes.ok:
3427 return response.content
3428
3429 return None
3430
3431 def get_vcd_network(self, network_uuid=None):
3432 """
3433 Method retrieves available network from vCloud Director
3434
3435 Args:
3436 network_uuid - is VCD network UUID
3437
3438 Each element serialized as key : value pair
3439
3440 Following keys available for access. network_configuration['Gateway'}
3441 <Configuration>
3442 <IpScopes>
3443 <IpScope>
3444 <IsInherited>true</IsInherited>
3445 <Gateway>172.16.252.100</Gateway>
3446 <Netmask>255.255.255.0</Netmask>
3447 <Dns1>172.16.254.201</Dns1>
3448 <Dns2>172.16.254.202</Dns2>
3449 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3450 <IsEnabled>true</IsEnabled>
3451 <IpRanges>
3452 <IpRange>
3453 <StartAddress>172.16.252.1</StartAddress>
3454 <EndAddress>172.16.252.99</EndAddress>
3455 </IpRange>
3456 </IpRanges>
3457 </IpScope>
3458 </IpScopes>
3459 <FenceMode>bridged</FenceMode>
3460
3461 Returns:
3462 The return dictionary and key for each entry vapp UUID
3463 """
3464
3465 network_configuration = {}
3466 if network_uuid is None:
3467 return network_uuid
3468
3469 try:
3470 content = self.get_network_action(network_uuid=network_uuid)
3471 vm_list_xmlroot = XmlElementTree.fromstring(content)
3472
3473 network_configuration['status'] = vm_list_xmlroot.get("status")
3474 network_configuration['name'] = vm_list_xmlroot.get("name")
3475 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3476
3477 for child in vm_list_xmlroot:
3478 if child.tag.split("}")[1] == 'IsShared':
3479 network_configuration['isShared'] = child.text.strip()
3480 if child.tag.split("}")[1] == 'Configuration':
3481 for configuration in child.iter():
3482 tagKey = configuration.tag.split("}")[1].strip()
3483 if tagKey != "":
3484 network_configuration[tagKey] = configuration.text.strip()
3485 return network_configuration
3486 except Exception as exp :
3487 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3488 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3489
3490 return network_configuration
3491
3492 def delete_network_action(self, network_uuid=None):
3493 """
3494 Method delete given network from vCloud director
3495
3496 Args:
3497 network_uuid - is a network uuid that client wish to delete
3498
3499 Returns:
3500 The return None or XML respond or false
3501 """
3502 client = self.connect_as_admin()
3503 if not client:
3504 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3505 if network_uuid is None:
3506 return False
3507
3508 url_list = [self.url, '/api/admin/network/', network_uuid]
3509 vm_list_rest_call = ''.join(url_list)
3510
3511 if client._session:
3512 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3513 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3514 response = self.perform_request(req_type='DELETE',
3515 url=vm_list_rest_call,
3516 headers=headers)
3517 if response.status_code == 202:
3518 return True
3519
3520 return False
3521
3522 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3523 ip_profile=None, isshared='true'):
3524 """
3525 Method create network in vCloud director
3526
3527 Args:
3528 network_name - is network name to be created.
3529 net_type - can be 'bridge','data','ptp','mgmt'.
3530 ip_profile is a dict containing the IP parameters of the network
3531 isshared - is a boolean
3532 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3533 It optional attribute. by default if no parent network indicate the first available will be used.
3534
3535 Returns:
3536 The return network uuid or return None
3537 """
3538
3539 new_network_name = [network_name, '-', str(uuid.uuid4())]
3540 content = self.create_network_rest(network_name=''.join(new_network_name),
3541 ip_profile=ip_profile,
3542 net_type=net_type,
3543 parent_network_uuid=parent_network_uuid,
3544 isshared=isshared)
3545 if content is None:
3546 self.logger.debug("Failed create network {}.".format(network_name))
3547 return None
3548
3549 try:
3550 vm_list_xmlroot = XmlElementTree.fromstring(content)
3551 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3552 if len(vcd_uuid) == 4:
3553 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3554 return vcd_uuid[3]
3555 except:
3556 self.logger.debug("Failed create network {}".format(network_name))
3557 return None
3558
3559 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3560 ip_profile=None, isshared='true'):
3561 """
3562 Method create network in vCloud director
3563
3564 Args:
3565 network_name - is network name to be created.
3566 net_type - can be 'bridge','data','ptp','mgmt'.
3567 ip_profile is a dict containing the IP parameters of the network
3568 isshared - is a boolean
3569 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3570 It optional attribute. by default if no parent network indicate the first available will be used.
3571
3572 Returns:
3573 The return network uuid or return None
3574 """
3575 client_as_admin = self.connect_as_admin()
3576 if not client_as_admin:
3577 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3578 if network_name is None:
3579 return None
3580
3581 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3582 vm_list_rest_call = ''.join(url_list)
3583
3584 if client_as_admin._session:
3585 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3586 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3587
3588 response = self.perform_request(req_type='GET',
3589 url=vm_list_rest_call,
3590 headers=headers)
3591
3592 provider_network = None
3593 available_networks = None
3594 add_vdc_rest_url = None
3595
3596 if response.status_code != requests.codes.ok:
3597 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3598 response.status_code))
3599 return None
3600 else:
3601 try:
3602 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3603 for child in vm_list_xmlroot:
3604 if child.tag.split("}")[1] == 'ProviderVdcReference':
3605 provider_network = child.attrib.get('href')
3606 # application/vnd.vmware.admin.providervdc+xml
3607 if child.tag.split("}")[1] == 'Link':
3608 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3609 and child.attrib.get('rel') == 'add':
3610 add_vdc_rest_url = child.attrib.get('href')
3611 except:
3612 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3613 self.logger.debug("Respond body {}".format(response.content))
3614 return None
3615
3616 # find pvdc provided available network
3617 response = self.perform_request(req_type='GET',
3618 url=provider_network,
3619 headers=headers)
3620 if response.status_code != requests.codes.ok:
3621 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3622 response.status_code))
3623 return None
3624
3625 if parent_network_uuid is None:
3626 try:
3627 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3628 for child in vm_list_xmlroot.iter():
3629 if child.tag.split("}")[1] == 'AvailableNetworks':
3630 for networks in child.iter():
3631 # application/vnd.vmware.admin.network+xml
3632 if networks.attrib.get('href') is not None:
3633 available_networks = networks.attrib.get('href')
3634 break
3635 except:
3636 return None
3637
3638 try:
3639 #Configure IP profile of the network
3640 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3641
3642 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3643 subnet_rand = random.randint(0, 255)
3644 ip_base = "192.168.{}.".format(subnet_rand)
3645 ip_profile['subnet_address'] = ip_base + "0/24"
3646 else:
3647 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3648
3649 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3650 ip_profile['gateway_address']=ip_base + "1"
3651 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3652 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3653 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3654 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3655 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3656 ip_profile['dhcp_start_address']=ip_base + "3"
3657 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3658 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3659 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3660 ip_profile['dns_address']=ip_base + "2"
3661
3662 gateway_address=ip_profile['gateway_address']
3663 dhcp_count=int(ip_profile['dhcp_count'])
3664 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3665
3666 if ip_profile['dhcp_enabled']==True:
3667 dhcp_enabled='true'
3668 else:
3669 dhcp_enabled='false'
3670 dhcp_start_address=ip_profile['dhcp_start_address']
3671
3672 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3673 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3674 end_ip_int += dhcp_count - 1
3675 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3676
3677 ip_version=ip_profile['ip_version']
3678 dns_address=ip_profile['dns_address']
3679 except KeyError as exp:
3680 self.logger.debug("Create Network REST: Key error {}".format(exp))
3681 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3682
3683 # either use client provided UUID or search for a first available
3684 # if both are not defined we return none
3685 if parent_network_uuid is not None:
3686 provider_network = None
3687 available_networks = None
3688 add_vdc_rest_url = None
3689
3690 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3691 add_vdc_rest_url = ''.join(url_list)
3692
3693 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3694 available_networks = ''.join(url_list)
3695
3696 #Creating all networks as Direct Org VDC type networks.
3697 #Unused in case of Underlay (data/ptp) network interface.
3698 fence_mode="bridged"
3699 is_inherited='false'
3700 dns_list = dns_address.split(";")
3701 dns1 = dns_list[0]
3702 dns2_text = ""
3703 if len(dns_list) >= 2:
3704 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3705 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3706 <Description>Openmano created</Description>
3707 <Configuration>
3708 <IpScopes>
3709 <IpScope>
3710 <IsInherited>{1:s}</IsInherited>
3711 <Gateway>{2:s}</Gateway>
3712 <Netmask>{3:s}</Netmask>
3713 <Dns1>{4:s}</Dns1>{5:s}
3714 <IsEnabled>{6:s}</IsEnabled>
3715 <IpRanges>
3716 <IpRange>
3717 <StartAddress>{7:s}</StartAddress>
3718 <EndAddress>{8:s}</EndAddress>
3719 </IpRange>
3720 </IpRanges>
3721 </IpScope>
3722 </IpScopes>
3723 <ParentNetwork href="{9:s}"/>
3724 <FenceMode>{10:s}</FenceMode>
3725 </Configuration>
3726 <IsShared>{11:s}</IsShared>
3727 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3728 subnet_address, dns1, dns2_text, dhcp_enabled,
3729 dhcp_start_address, dhcp_end_address, available_networks,
3730 fence_mode, isshared)
3731
3732 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3733 try:
3734 response = self.perform_request(req_type='POST',
3735 url=add_vdc_rest_url,
3736 headers=headers,
3737 data=data)
3738
3739 if response.status_code != 201:
3740 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
3741 .format(response.status_code,response.content))
3742 else:
3743 network_task = self.get_task_from_response(response.content)
3744 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3745 time.sleep(5)
3746 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3747 if result.get('status') == 'success':
3748 return response.content
3749 else:
3750 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3751 .format(response.content))
3752 except Exception as exp:
3753 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3754
3755 return None
3756
3757 def convert_cidr_to_netmask(self, cidr_ip=None):
3758 """
3759 Method sets convert CIDR netmask address to normal IP format
3760 Args:
3761 cidr_ip : CIDR IP address
3762 Returns:
3763 netmask : Converted netmask
3764 """
3765 if cidr_ip is not None:
3766 if '/' in cidr_ip:
3767 network, net_bits = cidr_ip.split('/')
3768 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3769 else:
3770 netmask = cidr_ip
3771 return netmask
3772 return None
3773
3774 def get_provider_rest(self, vca=None):
3775 """
3776 Method gets provider vdc view from vcloud director
3777
3778 Args:
3779 network_name - is network name to be created.
3780 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3781 It optional attribute. by default if no parent network indicate the first available will be used.
3782
3783 Returns:
3784 The return xml content of respond or None
3785 """
3786
3787 url_list = [self.url, '/api/admin']
3788 if vca:
3789 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3790 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3791 response = self.perform_request(req_type='GET',
3792 url=''.join(url_list),
3793 headers=headers)
3794
3795 if response.status_code == requests.codes.ok:
3796 return response.content
3797 return None
3798
3799 def create_vdc(self, vdc_name=None):
3800
3801 vdc_dict = {}
3802
3803 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3804 if xml_content is not None:
3805 try:
3806 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3807 for child in task_resp_xmlroot:
3808 if child.tag.split("}")[1] == 'Owner':
3809 vdc_id = child.attrib.get('href').split("/")[-1]
3810 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3811 return vdc_dict
3812 except:
3813 self.logger.debug("Respond body {}".format(xml_content))
3814
3815 return None
3816
3817 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3818 """
3819 Method create vdc in vCloud director based on VDC template.
3820 it uses pre-defined template.
3821
3822 Args:
3823 vdc_name - name of a new vdc.
3824
3825 Returns:
3826 The return xml content of respond or None
3827 """
3828 # pre-requesite atleast one vdc template should be available in vCD
3829 self.logger.info("Creating new vdc {}".format(vdc_name))
3830 vca = self.connect_as_admin()
3831 if not vca:
3832 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3833 if vdc_name is None:
3834 return None
3835
3836 url_list = [self.url, '/api/vdcTemplates']
3837 vm_list_rest_call = ''.join(url_list)
3838
3839 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3840 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
3841 response = self.perform_request(req_type='GET',
3842 url=vm_list_rest_call,
3843 headers=headers)
3844
3845 # container url to a template
3846 vdc_template_ref = None
3847 try:
3848 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3849 for child in vm_list_xmlroot:
3850 # application/vnd.vmware.admin.providervdc+xml
3851 # we need find a template from witch we instantiate VDC
3852 if child.tag.split("}")[1] == 'VdcTemplate':
3853 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3854 vdc_template_ref = child.attrib.get('href')
3855 except:
3856 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3857 self.logger.debug("Respond body {}".format(response.content))
3858 return None
3859
3860 # if we didn't found required pre defined template we return None
3861 if vdc_template_ref is None:
3862 return None
3863
3864 try:
3865 # instantiate vdc
3866 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
3867 vm_list_rest_call = ''.join(url_list)
3868 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3869 <Source href="{1:s}"></Source>
3870 <Description>opnemano</Description>
3871 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3872
3873 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3874
3875 response = self.perform_request(req_type='POST',
3876 url=vm_list_rest_call,
3877 headers=headers,
3878 data=data)
3879
3880 vdc_task = self.get_task_from_response(response.content)
3881 self.client.get_task_monitor().wait_for_success(task=vdc_task)
3882
3883 # if we all ok we respond with content otherwise by default None
3884 if response.status_code >= 200 and response.status_code < 300:
3885 return response.content
3886 return None
3887 except:
3888 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3889 self.logger.debug("Respond body {}".format(response.content))
3890
3891 return None
3892
3893 def create_vdc_rest(self, vdc_name=None):
3894 """
3895 Method create network in vCloud director
3896
3897 Args:
3898 vdc_name - vdc name to be created
3899 Returns:
3900 The return response
3901 """
3902
3903 self.logger.info("Creating new vdc {}".format(vdc_name))
3904
3905 vca = self.connect_as_admin()
3906 if not vca:
3907 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3908 if vdc_name is None:
3909 return None
3910
3911 url_list = [self.url, '/api/admin/org/', self.org_uuid]
3912 vm_list_rest_call = ''.join(url_list)
3913
3914 if vca._session:
3915 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3916 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3917 response = self.perform_request(req_type='GET',
3918 url=vm_list_rest_call,
3919 headers=headers)
3920
3921 provider_vdc_ref = None
3922 add_vdc_rest_url = None
3923 available_networks = None
3924
3925 if response.status_code != requests.codes.ok:
3926 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3927 response.status_code))
3928 return None
3929 else:
3930 try:
3931 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3932 for child in vm_list_xmlroot:
3933 # application/vnd.vmware.admin.providervdc+xml
3934 if child.tag.split("}")[1] == 'Link':
3935 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3936 and child.attrib.get('rel') == 'add':
3937 add_vdc_rest_url = child.attrib.get('href')
3938 except:
3939 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3940 self.logger.debug("Respond body {}".format(response.content))
3941 return None
3942
3943 response = self.get_provider_rest(vca=vca)
3944 try:
3945 vm_list_xmlroot = XmlElementTree.fromstring(response)
3946 for child in vm_list_xmlroot:
3947 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3948 for sub_child in child:
3949 provider_vdc_ref = sub_child.attrib.get('href')
3950 except:
3951 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3952 self.logger.debug("Respond body {}".format(response))
3953 return None
3954
3955 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3956 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3957 <AllocationModel>ReservationPool</AllocationModel>
3958 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3959 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3960 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3961 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3962 <ProviderVdcReference
3963 name="Main Provider"
3964 href="{2:s}" />
3965 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3966 escape(vdc_name),
3967 provider_vdc_ref)
3968
3969 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3970
3971 response = self.perform_request(req_type='POST',
3972 url=add_vdc_rest_url,
3973 headers=headers,
3974 data=data)
3975
3976 # if we all ok we respond with content otherwise by default None
3977 if response.status_code == 201:
3978 return response.content
3979 return None
3980
3981 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3982 """
3983 Method retrieve vapp detail from vCloud director
3984
3985 Args:
3986 vapp_uuid - is vapp identifier.
3987
3988 Returns:
3989 The return network uuid or return None
3990 """
3991
3992 parsed_respond = {}
3993 vca = None
3994
3995 if need_admin_access:
3996 vca = self.connect_as_admin()
3997 else:
3998 vca = self.client
3999
4000 if not vca:
4001 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4002 if vapp_uuid is None:
4003 return None
4004
4005 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4006 get_vapp_restcall = ''.join(url_list)
4007
4008 if vca._session:
4009 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4010 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4011 response = self.perform_request(req_type='GET',
4012 url=get_vapp_restcall,
4013 headers=headers)
4014
4015 if response.status_code == 403:
4016 if need_admin_access == False:
4017 response = self.retry_rest('GET', get_vapp_restcall)
4018
4019 if response.status_code != requests.codes.ok:
4020 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4021 response.status_code))
4022 return parsed_respond
4023
4024 try:
4025 xmlroot_respond = XmlElementTree.fromstring(response.content)
4026 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4027
4028 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4029 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4030 'vmw': 'http://www.vmware.com/schema/ovf',
4031 'vm': 'http://www.vmware.com/vcloud/v1.5',
4032 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4033 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4034 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4035 }
4036
4037 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4038 if created_section is not None:
4039 parsed_respond['created'] = created_section.text
4040
4041 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4042 if network_section is not None and 'networkName' in network_section.attrib:
4043 parsed_respond['networkname'] = network_section.attrib['networkName']
4044
4045 ipscopes_section = \
4046 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4047 namespaces)
4048 if ipscopes_section is not None:
4049 for ipscope in ipscopes_section:
4050 for scope in ipscope:
4051 tag_key = scope.tag.split("}")[1]
4052 if tag_key == 'IpRanges':
4053 ip_ranges = scope.getchildren()
4054 for ipblock in ip_ranges:
4055 for block in ipblock:
4056 parsed_respond[block.tag.split("}")[1]] = block.text
4057 else:
4058 parsed_respond[tag_key] = scope.text
4059
4060 # parse children section for other attrib
4061 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4062 if children_section is not None:
4063 parsed_respond['name'] = children_section.attrib['name']
4064 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4065 if "nestedHypervisorEnabled" in children_section.attrib else None
4066 parsed_respond['deployed'] = children_section.attrib['deployed']
4067 parsed_respond['status'] = children_section.attrib['status']
4068 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4069 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4070 nic_list = []
4071 for adapters in network_adapter:
4072 adapter_key = adapters.tag.split("}")[1]
4073 if adapter_key == 'PrimaryNetworkConnectionIndex':
4074 parsed_respond['primarynetwork'] = adapters.text
4075 if adapter_key == 'NetworkConnection':
4076 vnic = {}
4077 if 'network' in adapters.attrib:
4078 vnic['network'] = adapters.attrib['network']
4079 for adapter in adapters:
4080 setting_key = adapter.tag.split("}")[1]
4081 vnic[setting_key] = adapter.text
4082 nic_list.append(vnic)
4083
4084 for link in children_section:
4085 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4086 if link.attrib['rel'] == 'screen:acquireTicket':
4087 parsed_respond['acquireTicket'] = link.attrib
4088 if link.attrib['rel'] == 'screen:acquireMksTicket':
4089 parsed_respond['acquireMksTicket'] = link.attrib
4090
4091 parsed_respond['interfaces'] = nic_list
4092 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4093 if vCloud_extension_section is not None:
4094 vm_vcenter_info = {}
4095 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4096 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4097 if vmext is not None:
4098 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4099 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4100
4101 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4102 vm_virtual_hardware_info = {}
4103 if virtual_hardware_section is not None:
4104 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4105 if item.find("rasd:Description",namespaces).text == "Hard disk":
4106 disk_size = item.find("rasd:HostResource" ,namespaces
4107 ).attrib["{"+namespaces['vm']+"}capacity"]
4108
4109 vm_virtual_hardware_info["disk_size"]= disk_size
4110 break
4111
4112 for link in virtual_hardware_section:
4113 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4114 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4115 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4116 break
4117
4118 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4119 except Exception as exp :
4120 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4121 return parsed_respond
4122
4123 def acquire_console(self, vm_uuid=None):
4124
4125 if vm_uuid is None:
4126 return None
4127 if self.client._session:
4128 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4129 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4130 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4131 console_dict = vm_dict['acquireTicket']
4132 console_rest_call = console_dict['href']
4133
4134 response = self.perform_request(req_type='POST',
4135 url=console_rest_call,
4136 headers=headers)
4137
4138 if response.status_code == 403:
4139 response = self.retry_rest('POST', console_rest_call)
4140
4141 if response.status_code == requests.codes.ok:
4142 return response.content
4143
4144 return None
4145
4146 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4147 """
4148 Method retrieve vm disk details
4149
4150 Args:
4151 vapp_uuid - is vapp identifier.
4152 flavor_disk - disk size as specified in VNFD (flavor)
4153
4154 Returns:
4155 The return network uuid or return None
4156 """
4157 status = None
4158 try:
4159 #Flavor disk is in GB convert it into MB
4160 flavor_disk = int(flavor_disk) * 1024
4161 vm_details = self.get_vapp_details_rest(vapp_uuid)
4162 if vm_details:
4163 vm_name = vm_details["name"]
4164 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4165
4166 if vm_details and "vm_virtual_hardware" in vm_details:
4167 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4168 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4169
4170 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4171
4172 if flavor_disk > vm_disk:
4173 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4174 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4175 vm_disk, flavor_disk ))
4176 else:
4177 status = True
4178 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4179
4180 return status
4181 except Exception as exp:
4182 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4183
4184
4185 def modify_vm_disk_rest(self, disk_href , disk_size):
4186 """
4187 Method retrieve modify vm disk size
4188
4189 Args:
4190 disk_href - vCD API URL to GET and PUT disk data
4191 disk_size - disk size as specified in VNFD (flavor)
4192
4193 Returns:
4194 The return network uuid or return None
4195 """
4196 if disk_href is None or disk_size is None:
4197 return None
4198
4199 if self.client._session:
4200 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4201 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4202 response = self.perform_request(req_type='GET',
4203 url=disk_href,
4204 headers=headers)
4205
4206 if response.status_code == 403:
4207 response = self.retry_rest('GET', disk_href)
4208
4209 if response.status_code != requests.codes.ok:
4210 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4211 response.status_code))
4212 return None
4213 try:
4214 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4215 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4216 #For python3
4217 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
4218 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4219
4220 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4221 if item.find("rasd:Description",namespaces).text == "Hard disk":
4222 disk_item = item.find("rasd:HostResource" ,namespaces )
4223 if disk_item is not None:
4224 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4225 break
4226
4227 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4228 xml_declaration=True)
4229
4230 #Send PUT request to modify disk size
4231 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4232
4233 response = self.perform_request(req_type='PUT',
4234 url=disk_href,
4235 headers=headers,
4236 data=data)
4237 if response.status_code == 403:
4238 add_headers = {'Content-Type': headers['Content-Type']}
4239 response = self.retry_rest('PUT', disk_href, add_headers, data)
4240
4241 if response.status_code != 202:
4242 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4243 response.status_code))
4244 else:
4245 modify_disk_task = self.get_task_from_response(response.content)
4246 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4247 if result.get('status') == 'success':
4248 return True
4249 else:
4250 return False
4251 return None
4252
4253 except Exception as exp :
4254 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4255 return None
4256
4257 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4258 """
4259 Method to attach pci devices to VM
4260
4261 Args:
4262 vapp_uuid - uuid of vApp/VM
4263 pci_devices - pci devices infromation as specified in VNFD (flavor)
4264
4265 Returns:
4266 The status of add pci device task , vm object and
4267 vcenter_conect object
4268 """
4269 vm_obj = None
4270 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4271 vcenter_conect, content = self.get_vcenter_content()
4272 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4273
4274 if vm_moref_id:
4275 try:
4276 no_of_pci_devices = len(pci_devices)
4277 if no_of_pci_devices > 0:
4278 #Get VM and its host
4279 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4280 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4281 if host_obj and vm_obj:
4282 #get PCI devies from host on which vapp is currently installed
4283 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4284
4285 if avilable_pci_devices is None:
4286 #find other hosts with active pci devices
4287 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4288 content,
4289 no_of_pci_devices
4290 )
4291
4292 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4293 #Migrate vm to the host where PCI devices are availble
4294 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4295 task = self.relocate_vm(new_host_obj, vm_obj)
4296 if task is not None:
4297 result = self.wait_for_vcenter_task(task, vcenter_conect)
4298 self.logger.info("Migrate VM status: {}".format(result))
4299 host_obj = new_host_obj
4300 else:
4301 self.logger.info("Fail to migrate VM : {}".format(result))
4302 raise vimconn.vimconnNotFoundException(
4303 "Fail to migrate VM : {} to host {}".format(
4304 vmname_andid,
4305 new_host_obj)
4306 )
4307
4308 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4309 #Add PCI devices one by one
4310 for pci_device in avilable_pci_devices:
4311 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4312 if task:
4313 status= self.wait_for_vcenter_task(task, vcenter_conect)
4314 if status:
4315 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4316 else:
4317 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4318 return True, vm_obj, vcenter_conect
4319 else:
4320 self.logger.error("Currently there is no host with"\
4321 " {} number of avaialble PCI devices required for VM {}".format(
4322 no_of_pci_devices,
4323 vmname_andid)
4324 )
4325 raise vimconn.vimconnNotFoundException(
4326 "Currently there is no host with {} "\
4327 "number of avaialble PCI devices required for VM {}".format(
4328 no_of_pci_devices,
4329 vmname_andid))
4330 else:
4331 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4332
4333 except vmodl.MethodFault as error:
4334 self.logger.error("Error occurred while adding PCI devices {} ",error)
4335 return None, vm_obj, vcenter_conect
4336
4337 def get_vm_obj(self, content, mob_id):
4338 """
4339 Method to get the vsphere VM object associated with a given morf ID
4340 Args:
4341 vapp_uuid - uuid of vApp/VM
4342 content - vCenter content object
4343 mob_id - mob_id of VM
4344
4345 Returns:
4346 VM and host object
4347 """
4348 vm_obj = None
4349 host_obj = None
4350 try :
4351 container = content.viewManager.CreateContainerView(content.rootFolder,
4352 [vim.VirtualMachine], True
4353 )
4354 for vm in container.view:
4355 mobID = vm._GetMoId()
4356 if mobID == mob_id:
4357 vm_obj = vm
4358 host_obj = vm_obj.runtime.host
4359 break
4360 except Exception as exp:
4361 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4362 return host_obj, vm_obj
4363
4364 def get_pci_devices(self, host, need_devices):
4365 """
4366 Method to get the details of pci devices on given host
4367 Args:
4368 host - vSphere host object
4369 need_devices - number of pci devices needed on host
4370
4371 Returns:
4372 array of pci devices
4373 """
4374 all_devices = []
4375 all_device_ids = []
4376 used_devices_ids = []
4377
4378 try:
4379 if host:
4380 pciPassthruInfo = host.config.pciPassthruInfo
4381 pciDevies = host.hardware.pciDevice
4382
4383 for pci_status in pciPassthruInfo:
4384 if pci_status.passthruActive:
4385 for device in pciDevies:
4386 if device.id == pci_status.id:
4387 all_device_ids.append(device.id)
4388 all_devices.append(device)
4389
4390 #check if devices are in use
4391 avalible_devices = all_devices
4392 for vm in host.vm:
4393 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4394 vm_devices = vm.config.hardware.device
4395 for device in vm_devices:
4396 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4397 if device.backing.id in all_device_ids:
4398 for use_device in avalible_devices:
4399 if use_device.id == device.backing.id:
4400 avalible_devices.remove(use_device)
4401 used_devices_ids.append(device.backing.id)
4402 self.logger.debug("Device {} from devices {}"\
4403 "is in use".format(device.backing.id,
4404 device)
4405 )
4406 if len(avalible_devices) < need_devices:
4407 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4408 need_devices))
4409 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4410 avalible_devices))
4411 return None
4412 else:
4413 required_devices = avalible_devices[:need_devices]
4414 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4415 len(avalible_devices),
4416 host,
4417 need_devices))
4418 self.logger.info("Retruning {} devices as {}".format(need_devices,
4419 required_devices ))
4420 return required_devices
4421
4422 except Exception as exp:
4423 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4424
4425 return None
4426
4427 def get_host_and_PCIdevices(self, content, need_devices):
4428 """
4429 Method to get the details of pci devices infromation on all hosts
4430
4431 Args:
4432 content - vSphere host object
4433 need_devices - number of pci devices needed on host
4434
4435 Returns:
4436 array of pci devices and host object
4437 """
4438 host_obj = None
4439 pci_device_objs = None
4440 try:
4441 if content:
4442 container = content.viewManager.CreateContainerView(content.rootFolder,
4443 [vim.HostSystem], True)
4444 for host in container.view:
4445 devices = self.get_pci_devices(host, need_devices)
4446 if devices:
4447 host_obj = host
4448 pci_device_objs = devices
4449 break
4450 except Exception as exp:
4451 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4452
4453 return host_obj,pci_device_objs
4454
4455 def relocate_vm(self, dest_host, vm) :
4456 """
4457 Method to get the relocate VM to new host
4458
4459 Args:
4460 dest_host - vSphere host object
4461 vm - vSphere VM object
4462
4463 Returns:
4464 task object
4465 """
4466 task = None
4467 try:
4468 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4469 task = vm.Relocate(relocate_spec)
4470 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4471 except Exception as exp:
4472 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4473 dest_host, vm, exp))
4474 return task
4475
4476 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4477 """
4478 Waits and provides updates on a vSphere task
4479 """
4480 while task.info.state == vim.TaskInfo.State.running:
4481 time.sleep(2)
4482
4483 if task.info.state == vim.TaskInfo.State.success:
4484 if task.info.result is not None and not hideResult:
4485 self.logger.info('{} completed successfully, result: {}'.format(
4486 actionName,
4487 task.info.result))
4488 else:
4489 self.logger.info('Task {} completed successfully.'.format(actionName))
4490 else:
4491 self.logger.error('{} did not complete successfully: {} '.format(
4492 actionName,
4493 task.info.error)
4494 )
4495
4496 return task.info.result
4497
4498 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4499 """
4500 Method to add pci device in given VM
4501
4502 Args:
4503 host_object - vSphere host object
4504 vm_object - vSphere VM object
4505 host_pci_dev - host_pci_dev must be one of the devices from the
4506 host_object.hardware.pciDevice list
4507 which is configured as a PCI passthrough device
4508
4509 Returns:
4510 task object
4511 """
4512 task = None
4513 if vm_object and host_object and host_pci_dev:
4514 try :
4515 #Add PCI device to VM
4516 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4517 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4518
4519 if host_pci_dev.id not in systemid_by_pciid:
4520 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4521 return None
4522
4523 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4524 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4525 id=host_pci_dev.id,
4526 systemId=systemid_by_pciid[host_pci_dev.id],
4527 vendorId=host_pci_dev.vendorId,
4528 deviceName=host_pci_dev.deviceName)
4529
4530 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4531
4532 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4533 new_device_config.operation = "add"
4534 vmConfigSpec = vim.vm.ConfigSpec()
4535 vmConfigSpec.deviceChange = [new_device_config]
4536
4537 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4538 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4539 host_pci_dev, vm_object, host_object)
4540 )
4541 except Exception as exp:
4542 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4543 host_pci_dev,
4544 vm_object,
4545 exp))
4546 return task
4547
4548 def get_vm_vcenter_info(self):
4549 """
4550 Method to get details of vCenter and vm
4551
4552 Args:
4553 vapp_uuid - uuid of vApp or VM
4554
4555 Returns:
4556 Moref Id of VM and deails of vCenter
4557 """
4558 vm_vcenter_info = {}
4559
4560 if self.vcenter_ip is not None:
4561 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4562 else:
4563 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4564 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4565 if self.vcenter_port is not None:
4566 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4567 else:
4568 raise vimconn.vimconnException(message="vCenter port is not provided."\
4569 " Please provide vCenter port while attaching datacenter to tenant in --config")
4570 if self.vcenter_user is not None:
4571 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4572 else:
4573 raise vimconn.vimconnException(message="vCenter user is not provided."\
4574 " Please provide vCenter user while attaching datacenter to tenant in --config")
4575
4576 if self.vcenter_password is not None:
4577 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4578 else:
4579 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4580 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4581
4582 return vm_vcenter_info
4583
4584
4585 def get_vm_pci_details(self, vmuuid):
4586 """
4587 Method to get VM PCI device details from vCenter
4588
4589 Args:
4590 vm_obj - vSphere VM object
4591
4592 Returns:
4593 dict of PCI devives attached to VM
4594
4595 """
4596 vm_pci_devices_info = {}
4597 try:
4598 vcenter_conect, content = self.get_vcenter_content()
4599 vm_moref_id = self.get_vm_moref_id(vmuuid)
4600 if vm_moref_id:
4601 #Get VM and its host
4602 if content:
4603 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4604 if host_obj and vm_obj:
4605 vm_pci_devices_info["host_name"]= host_obj.name
4606 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4607 for device in vm_obj.config.hardware.device:
4608 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4609 device_details={'devide_id':device.backing.id,
4610 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4611 }
4612 vm_pci_devices_info[device.deviceInfo.label] = device_details
4613 else:
4614 self.logger.error("Can not connect to vCenter while getting "\
4615 "PCI devices infromationn")
4616 return vm_pci_devices_info
4617 except Exception as exp:
4618 self.logger.error("Error occurred while getting VM infromationn"\
4619 " for VM : {}".format(exp))
4620 raise vimconn.vimconnException(message=exp)
4621
4622
4623 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4624 """
4625 Method to reserve memory for all VMs
4626 Args :
4627 vapp - VApp
4628 memory_mb - Memory in MB
4629 Returns:
4630 None
4631 """
4632
4633 self.logger.info("Reserve memory for all VMs")
4634 for vms in vapp.get_all_vms():
4635 vm_id = vms.get('id').split(':')[-1]
4636
4637 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4638
4639 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4640 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4641 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4642 response = self.perform_request(req_type='GET',
4643 url=url_rest_call,
4644 headers=headers)
4645
4646 if response.status_code == 403:
4647 response = self.retry_rest('GET', url_rest_call)
4648
4649 if response.status_code != 200:
4650 self.logger.error("REST call {} failed reason : {}"\
4651 "status code : {}".format(url_rest_call,
4652 response.content,
4653 response.status_code))
4654 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4655 "memory")
4656
4657 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4658 contentelem = lxmlElementTree.XML(bytexml)
4659 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
4660 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4661
4662 # Find the reservation element in the response
4663 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4664 for memelem in memelem_list:
4665 memelem.text = str(memory_mb)
4666
4667 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4668
4669 response = self.perform_request(req_type='PUT',
4670 url=url_rest_call,
4671 headers=headers,
4672 data=newdata)
4673
4674 if response.status_code == 403:
4675 add_headers = {'Content-Type': headers['Content-Type']}
4676 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4677
4678 if response.status_code != 202:
4679 self.logger.error("REST call {} failed reason : {}"\
4680 "status code : {} ".format(url_rest_call,
4681 response.content,
4682 response.status_code))
4683 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4684 "virtual hardware memory section")
4685 else:
4686 mem_task = self.get_task_from_response(response.content)
4687 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4688 if result.get('status') == 'success':
4689 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4690 .format(vm_id))
4691 else:
4692 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4693 .format(vm_id))
4694
4695 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4696 """
4697 Configure VApp network config with org vdc network
4698 Args :
4699 vapp - VApp
4700 Returns:
4701 None
4702 """
4703
4704 self.logger.info("Connecting vapp {} to org vdc network {}".
4705 format(vapp_id, net_name))
4706
4707 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4708
4709 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4710 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4711 response = self.perform_request(req_type='GET',
4712 url=url_rest_call,
4713 headers=headers)
4714
4715 if response.status_code == 403:
4716 response = self.retry_rest('GET', url_rest_call)
4717
4718 if response.status_code != 200:
4719 self.logger.error("REST call {} failed reason : {}"\
4720 "status code : {}".format(url_rest_call,
4721 response.content,
4722 response.status_code))
4723 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
4724 "network config section")
4725
4726 data = response.content
4727 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4728 net_id = self.get_network_id_by_name(net_name)
4729 if not net_id:
4730 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
4731 "existing network")
4732
4733 bytexml = bytes(bytearray(data, encoding='utf-8'))
4734 newelem = lxmlElementTree.XML(bytexml)
4735 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
4736 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4737 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4738
4739 newstr = """<NetworkConfig networkName="{}">
4740 <Configuration>
4741 <ParentNetwork href="{}/api/network/{}"/>
4742 <FenceMode>bridged</FenceMode>
4743 </Configuration>
4744 </NetworkConfig>
4745 """.format(net_name, self.url, net_id)
4746 newcfgelem = lxmlElementTree.fromstring(newstr)
4747 if nwcfglist:
4748 nwcfglist[0].addnext(newcfgelem)
4749
4750 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
4751
4752 response = self.perform_request(req_type='PUT',
4753 url=url_rest_call,
4754 headers=headers,
4755 data=newdata)
4756
4757 if response.status_code == 403:
4758 add_headers = {'Content-Type': headers['Content-Type']}
4759 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4760
4761 if response.status_code != 202:
4762 self.logger.error("REST call {} failed reason : {}"\
4763 "status code : {} ".format(url_rest_call,
4764 response.content,
4765 response.status_code))
4766 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
4767 "network config section")
4768 else:
4769 vapp_task = self.get_task_from_response(response.content)
4770 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
4771 if result.get('status') == 'success':
4772 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
4773 "network {}".format(vapp_id, net_name))
4774 else:
4775 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
4776 "connect to network {}".format(vapp_id, net_name))
4777
4778 def remove_primary_network_adapter_from_all_vms(self, vapp):
4779 """
4780 Method to remove network adapter type to vm
4781 Args :
4782 vapp - VApp
4783 Returns:
4784 None
4785 """
4786
4787 self.logger.info("Removing network adapter from all VMs")
4788 for vms in vapp.get_all_vms():
4789 vm_id = vms.get('id').split(':')[-1]
4790
4791 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4792
4793 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4794 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4795 response = self.perform_request(req_type='GET',
4796 url=url_rest_call,
4797 headers=headers)
4798
4799 if response.status_code == 403:
4800 response = self.retry_rest('GET', url_rest_call)
4801
4802 if response.status_code != 200:
4803 self.logger.error("REST call {} failed reason : {}"\
4804 "status code : {}".format(url_rest_call,
4805 response.content,
4806 response.status_code))
4807 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
4808 "network connection section")
4809
4810 data = response.content
4811 data = data.split('<Link rel="edit"')[0]
4812
4813 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4814
4815 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4816 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
4817 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
4818 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
4819 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
4820 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
4821 xmlns:vmw="http://www.vmware.com/schema/ovf"
4822 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
4823 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
4824 xmlns:ns9="http://www.vmware.com/vcloud/versions"
4825 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
4826 <ovf:Info>Specifies the available VM network connections</ovf:Info>
4827 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
4828 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
4829 </NetworkConnectionSection>""".format(url=url_rest_call)
4830 response = self.perform_request(req_type='PUT',
4831 url=url_rest_call,
4832 headers=headers,
4833 data=newdata)
4834
4835 if response.status_code == 403:
4836 add_headers = {'Content-Type': headers['Content-Type']}
4837 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4838
4839 if response.status_code != 202:
4840 self.logger.error("REST call {} failed reason : {}"\
4841 "status code : {} ".format(url_rest_call,
4842 response.content,
4843 response.status_code))
4844 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
4845 "network connection section")
4846 else:
4847 nic_task = self.get_task_from_response(response.content)
4848 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4849 if result.get('status') == 'success':
4850 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
4851 "default NIC type".format(vm_id))
4852 else:
4853 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
4854 "connect NIC type".format(vm_id))
4855
4856 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
4857 """
4858 Method to add network adapter type to vm
4859 Args :
4860 network_name - name of network
4861 primary_nic_index - int value for primary nic index
4862 nicIndex - int value for nic index
4863 nic_type - specify model name to which add to vm
4864 Returns:
4865 None
4866 """
4867
4868 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
4869 format(network_name, nicIndex, nic_type))
4870 try:
4871 ip_address = None
4872 floating_ip = False
4873 mac_address = None
4874 if 'floating_ip' in net: floating_ip = net['floating_ip']
4875
4876 # Stub for ip_address feature
4877 if 'ip_address' in net: ip_address = net['ip_address']
4878
4879 if 'mac_address' in net: mac_address = net['mac_address']
4880
4881 if floating_ip:
4882 allocation_mode = "POOL"
4883 elif ip_address:
4884 allocation_mode = "MANUAL"
4885 else:
4886 allocation_mode = "DHCP"
4887
4888 if not nic_type:
4889 for vms in vapp.get_all_vms():
4890 vm_id = vms.get('id').split(':')[-1]
4891
4892 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4893
4894 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4895 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4896 response = self.perform_request(req_type='GET',
4897 url=url_rest_call,
4898 headers=headers)
4899
4900 if response.status_code == 403:
4901 response = self.retry_rest('GET', url_rest_call)
4902
4903 if response.status_code != 200:
4904 self.logger.error("REST call {} failed reason : {}"\
4905 "status code : {}".format(url_rest_call,
4906 response.content,
4907 response.status_code))
4908 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4909 "network connection section")
4910
4911 data = response.content
4912 data = data.split('<Link rel="edit"')[0]
4913 if '<PrimaryNetworkConnectionIndex>' not in data:
4914 self.logger.debug("add_network_adapter PrimaryNIC not in data")
4915 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
4916 <NetworkConnection network="{}">
4917 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4918 <IsConnected>true</IsConnected>
4919 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4920 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
4921 allocation_mode)
4922 # Stub for ip_address feature
4923 if ip_address:
4924 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4925 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4926
4927 if mac_address:
4928 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4929 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4930
4931 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
4932 else:
4933 self.logger.debug("add_network_adapter PrimaryNIC in data")
4934 new_item = """<NetworkConnection network="{}">
4935 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4936 <IsConnected>true</IsConnected>
4937 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4938 </NetworkConnection>""".format(network_name, nicIndex,
4939 allocation_mode)
4940 # Stub for ip_address feature
4941 if ip_address:
4942 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4943 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4944
4945 if mac_address:
4946 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4947 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4948
4949 data = data + new_item + '</NetworkConnectionSection>'
4950
4951 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4952
4953 response = self.perform_request(req_type='PUT',
4954 url=url_rest_call,
4955 headers=headers,
4956 data=data)
4957
4958 if response.status_code == 403:
4959 add_headers = {'Content-Type': headers['Content-Type']}
4960 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
4961
4962 if response.status_code != 202:
4963 self.logger.error("REST call {} failed reason : {}"\
4964 "status code : {} ".format(url_rest_call,
4965 response.content,
4966 response.status_code))
4967 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
4968 "network connection section")
4969 else:
4970 nic_task = self.get_task_from_response(response.content)
4971 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4972 if result.get('status') == 'success':
4973 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
4974 "default NIC type".format(vm_id))
4975 else:
4976 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
4977 "connect NIC type".format(vm_id))
4978 else:
4979 for vms in vapp.get_all_vms():
4980 vm_id = vms.get('id').split(':')[-1]
4981
4982 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4983
4984 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4985 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4986 response = self.perform_request(req_type='GET',
4987 url=url_rest_call,
4988 headers=headers)
4989
4990 if response.status_code == 403:
4991 response = self.retry_rest('GET', url_rest_call)
4992
4993 if response.status_code != 200:
4994 self.logger.error("REST call {} failed reason : {}"\
4995 "status code : {}".format(url_rest_call,
4996 response.content,
4997 response.status_code))
4998 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4999 "network connection section")
5000 data = response.content
5001 data = data.split('<Link rel="edit"')[0]
5002 vcd_netadapter_type = nic_type
5003 if nic_type in ['SR-IOV', 'VF']:
5004 vcd_netadapter_type = "SRIOVETHERNETCARD"
5005
5006 if '<PrimaryNetworkConnectionIndex>' not in data:
5007 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5008 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5009 <NetworkConnection network="{}">
5010 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5011 <IsConnected>true</IsConnected>
5012 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5013 <NetworkAdapterType>{}</NetworkAdapterType>
5014 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5015 allocation_mode, vcd_netadapter_type)
5016 # Stub for ip_address feature
5017 if ip_address:
5018 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5019 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5020
5021 if mac_address:
5022 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5023 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5024
5025 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5026 else:
5027 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5028 new_item = """<NetworkConnection network="{}">
5029 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5030 <IsConnected>true</IsConnected>
5031 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5032 <NetworkAdapterType>{}</NetworkAdapterType>
5033 </NetworkConnection>""".format(network_name, nicIndex,
5034 allocation_mode, vcd_netadapter_type)
5035 # Stub for ip_address feature
5036 if ip_address:
5037 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5038 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5039
5040 if mac_address:
5041 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5042 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5043
5044 data = data + new_item + '</NetworkConnectionSection>'
5045
5046 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5047
5048 response = self.perform_request(req_type='PUT',
5049 url=url_rest_call,
5050 headers=headers,
5051 data=data)
5052
5053 if response.status_code == 403:
5054 add_headers = {'Content-Type': headers['Content-Type']}
5055 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5056
5057 if response.status_code != 202:
5058 self.logger.error("REST call {} failed reason : {}"\
5059 "status code : {}".format(url_rest_call,
5060 response.content,
5061 response.status_code))
5062 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5063 "network connection section")
5064 else:
5065 nic_task = self.get_task_from_response(response.content)
5066 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5067 if result.get('status') == 'success':
5068 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5069 "conneced to NIC type {}".format(vm_id, nic_type))
5070 else:
5071 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5072 "failed to connect NIC type {}".format(vm_id, nic_type))
5073 except Exception as exp:
5074 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5075 "while adding Network adapter")
5076 raise vimconn.vimconnException(message=exp)
5077
5078
5079 def set_numa_affinity(self, vmuuid, paired_threads_id):
5080 """
5081 Method to assign numa affinity in vm configuration parammeters
5082 Args :
5083 vmuuid - vm uuid
5084 paired_threads_id - one or more virtual processor
5085 numbers
5086 Returns:
5087 return if True
5088 """
5089 try:
5090 vcenter_conect, content = self.get_vcenter_content()
5091 vm_moref_id = self.get_vm_moref_id(vmuuid)
5092
5093 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5094 if vm_obj:
5095 config_spec = vim.vm.ConfigSpec()
5096 config_spec.extraConfig = []
5097 opt = vim.option.OptionValue()
5098 opt.key = 'numa.nodeAffinity'
5099 opt.value = str(paired_threads_id)
5100 config_spec.extraConfig.append(opt)
5101 task = vm_obj.ReconfigVM_Task(config_spec)
5102 if task:
5103 result = self.wait_for_vcenter_task(task, vcenter_conect)
5104 extra_config = vm_obj.config.extraConfig
5105 flag = False
5106 for opts in extra_config:
5107 if 'numa.nodeAffinity' in opts.key:
5108 flag = True
5109 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5110 "value {} for vm {}".format(opt.value, vm_obj))
5111 if flag:
5112 return
5113 else:
5114 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5115 except Exception as exp:
5116 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5117 "for VM {} : {}".format(vm_obj, vm_moref_id))
5118 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5119 "affinity".format(exp))
5120
5121
5122 def cloud_init(self, vapp, cloud_config):
5123 """
5124 Method to inject ssh-key
5125 vapp - vapp object
5126 cloud_config a dictionary with:
5127 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5128 'users': (optional) list of users to be inserted, each item is a dict with:
5129 'name': (mandatory) user name,
5130 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5131 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5132 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5133 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5134 'dest': (mandatory) string with the destination absolute path
5135 'encoding': (optional, by default text). Can be one of:
5136 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5137 'content' (mandatory): string with the content of the file
5138 'permissions': (optional) string with file permissions, typically octal notation '0644'
5139 'owner': (optional) file owner, string with the format 'owner:group'
5140 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5141 """
5142 try:
5143 if not isinstance(cloud_config, dict):
5144 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5145 else:
5146 key_pairs = []
5147 userdata = []
5148 if "key-pairs" in cloud_config:
5149 key_pairs = cloud_config["key-pairs"]
5150
5151 if "users" in cloud_config:
5152 userdata = cloud_config["users"]
5153
5154 self.logger.debug("cloud_init : Guest os customization started..")
5155 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5156 customize_script = customize_script.replace("&","&amp;")
5157 self.guest_customization(vapp, customize_script)
5158
5159 except Exception as exp:
5160 self.logger.error("cloud_init : exception occurred while injecting "\
5161 "ssh-key")
5162 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5163 "ssh-key".format(exp))
5164
5165 def format_script(self, key_pairs=[], users_list=[]):
5166 bash_script = """#!/bin/sh
5167 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5168 if [ "$1" = "precustomization" ];then
5169 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5170 """
5171
5172 keys = "\n".join(key_pairs)
5173 if keys:
5174 keys_data = """
5175 if [ ! -d /root/.ssh ];then
5176 mkdir /root/.ssh
5177 chown root:root /root/.ssh
5178 chmod 700 /root/.ssh
5179 touch /root/.ssh/authorized_keys
5180 chown root:root /root/.ssh/authorized_keys
5181 chmod 600 /root/.ssh/authorized_keys
5182 # make centos with selinux happy
5183 which restorecon && restorecon -Rv /root/.ssh
5184 else
5185 touch /root/.ssh/authorized_keys
5186 chown root:root /root/.ssh/authorized_keys
5187 chmod 600 /root/.ssh/authorized_keys
5188 fi
5189 echo '{key}' >> /root/.ssh/authorized_keys
5190 """.format(key=keys)
5191
5192 bash_script+= keys_data
5193
5194 for user in users_list:
5195 if 'name' in user: user_name = user['name']
5196 if 'key-pairs' in user:
5197 user_keys = "\n".join(user['key-pairs'])
5198 else:
5199 user_keys = None
5200
5201 add_user_name = """
5202 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5203 """.format(user_name=user_name)
5204
5205 bash_script+= add_user_name
5206
5207 if user_keys:
5208 user_keys_data = """
5209 mkdir /home/{user_name}/.ssh
5210 chown {user_name}:{user_name} /home/{user_name}/.ssh
5211 chmod 700 /home/{user_name}/.ssh
5212 touch /home/{user_name}/.ssh/authorized_keys
5213 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5214 chmod 600 /home/{user_name}/.ssh/authorized_keys
5215 # make centos with selinux happy
5216 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5217 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5218 """.format(user_name=user_name,user_key=user_keys)
5219
5220 bash_script+= user_keys_data
5221
5222 return bash_script+"\n\tfi"
5223
5224 def guest_customization(self, vapp, customize_script):
5225 """
5226 Method to customize guest os
5227 vapp - Vapp object
5228 customize_script - Customize script to be run at first boot of VM.
5229 """
5230 for vm in vapp.get_all_vms():
5231 vm_id = vm.get('id').split(':')[-1]
5232 vm_name = vm.get('name')
5233 vm_name = vm_name.replace('_','-')
5234
5235 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5236 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5237 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5238
5239 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5240
5241 data = """<GuestCustomizationSection
5242 xmlns="http://www.vmware.com/vcloud/v1.5"
5243 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5244 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5245 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5246 <Enabled>true</Enabled>
5247 <ChangeSid>false</ChangeSid>
5248 <VirtualMachineId>{}</VirtualMachineId>
5249 <JoinDomainEnabled>false</JoinDomainEnabled>
5250 <UseOrgSettings>false</UseOrgSettings>
5251 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5252 <AdminPasswordAuto>true</AdminPasswordAuto>
5253 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5254 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5255 <ResetPasswordRequired>false</ResetPasswordRequired>
5256 <CustomizationScript>{}</CustomizationScript>
5257 <ComputerName>{}</ComputerName>
5258 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5259 </GuestCustomizationSection>
5260 """.format(vm_customization_url,
5261 vm_id,
5262 customize_script,
5263 vm_name,
5264 vm_customization_url)
5265
5266 response = self.perform_request(req_type='PUT',
5267 url=vm_customization_url,
5268 headers=headers,
5269 data=data)
5270 if response.status_code == 202:
5271 guest_task = self.get_task_from_response(response.content)
5272 self.client.get_task_monitor().wait_for_success(task=guest_task)
5273 self.logger.info("guest_customization : customized guest os task "\
5274 "completed for VM {}".format(vm_name))
5275 else:
5276 self.logger.error("guest_customization : task for customized guest os"\
5277 "failed for VM {}".format(vm_name))
5278 raise vimconn.vimconnException("guest_customization : failed to perform"\
5279 "guest os customization on VM {}".format(vm_name))
5280
5281 def add_new_disk(self, vapp_uuid, disk_size):
5282 """
5283 Method to create an empty vm disk
5284
5285 Args:
5286 vapp_uuid - is vapp identifier.
5287 disk_size - size of disk to be created in GB
5288
5289 Returns:
5290 None
5291 """
5292 status = False
5293 vm_details = None
5294 try:
5295 #Disk size in GB, convert it into MB
5296 if disk_size is not None:
5297 disk_size_mb = int(disk_size) * 1024
5298 vm_details = self.get_vapp_details_rest(vapp_uuid)
5299
5300 if vm_details and "vm_virtual_hardware" in vm_details:
5301 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5302 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5303 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5304
5305 except Exception as exp:
5306 msg = "Error occurred while creating new disk {}.".format(exp)
5307 self.rollback_newvm(vapp_uuid, msg)
5308
5309 if status:
5310 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5311 else:
5312 #If failed to add disk, delete VM
5313 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5314 self.rollback_newvm(vapp_uuid, msg)
5315
5316
5317 def add_new_disk_rest(self, disk_href, disk_size_mb):
5318 """
5319 Retrives vApp Disks section & add new empty disk
5320
5321 Args:
5322 disk_href: Disk section href to addd disk
5323 disk_size_mb: Disk size in MB
5324
5325 Returns: Status of add new disk task
5326 """
5327 status = False
5328 if self.client._session:
5329 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5330 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5331 response = self.perform_request(req_type='GET',
5332 url=disk_href,
5333 headers=headers)
5334
5335 if response.status_code == 403:
5336 response = self.retry_rest('GET', disk_href)
5337
5338 if response.status_code != requests.codes.ok:
5339 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5340 .format(disk_href, response.status_code))
5341 return status
5342 try:
5343 #Find but type & max of instance IDs assigned to disks
5344 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5345 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
5346 #For python3
5347 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
5348 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5349 instance_id = 0
5350 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5351 if item.find("rasd:Description",namespaces).text == "Hard disk":
5352 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5353 if inst_id > instance_id:
5354 instance_id = inst_id
5355 disk_item = item.find("rasd:HostResource" ,namespaces)
5356 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5357 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5358
5359 instance_id = instance_id + 1
5360 new_item = """<Item>
5361 <rasd:Description>Hard disk</rasd:Description>
5362 <rasd:ElementName>New disk</rasd:ElementName>
5363 <rasd:HostResource
5364 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5365 vcloud:capacity="{}"
5366 vcloud:busSubType="{}"
5367 vcloud:busType="{}"></rasd:HostResource>
5368 <rasd:InstanceID>{}</rasd:InstanceID>
5369 <rasd:ResourceType>17</rasd:ResourceType>
5370 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5371
5372 new_data = response.content
5373 #Add new item at the bottom
5374 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5375
5376 # Send PUT request to modify virtual hardware section with new disk
5377 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5378
5379 response = self.perform_request(req_type='PUT',
5380 url=disk_href,
5381 data=new_data,
5382 headers=headers)
5383
5384 if response.status_code == 403:
5385 add_headers = {'Content-Type': headers['Content-Type']}
5386 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5387
5388 if response.status_code != 202:
5389 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5390 .format(disk_href, response.status_code, response.content))
5391 else:
5392 add_disk_task = self.get_task_from_response(response.content)
5393 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5394 if result.get('status') == 'success':
5395 status = True
5396 else:
5397 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5398
5399 except Exception as exp:
5400 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5401
5402 return status
5403
5404
5405 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5406 """
5407 Method to add existing disk to vm
5408 Args :
5409 catalogs - List of VDC catalogs
5410 image_id - Catalog ID
5411 template_name - Name of template in catalog
5412 vapp_uuid - UUID of vApp
5413 Returns:
5414 None
5415 """
5416 disk_info = None
5417 vcenter_conect, content = self.get_vcenter_content()
5418 #find moref-id of vm in image
5419 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5420 image_id=image_id,
5421 )
5422
5423 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5424 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5425 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5426 if catalog_vm_moref_id:
5427 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5428 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5429 if catalog_vm_obj:
5430 #find existing disk
5431 disk_info = self.find_disk(catalog_vm_obj)
5432 else:
5433 exp_msg = "No VM with image id {} found".format(image_id)
5434 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5435 else:
5436 exp_msg = "No Image found with image ID {} ".format(image_id)
5437 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5438
5439 if disk_info:
5440 self.logger.info("Existing disk_info : {}".format(disk_info))
5441 #get VM
5442 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5443 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5444 if vm_obj:
5445 status = self.add_disk(vcenter_conect=vcenter_conect,
5446 vm=vm_obj,
5447 disk_info=disk_info,
5448 size=size,
5449 vapp_uuid=vapp_uuid
5450 )
5451 if status:
5452 self.logger.info("Disk from image id {} added to {}".format(image_id,
5453 vm_obj.config.name)
5454 )
5455 else:
5456 msg = "No disk found with image id {} to add in VM {}".format(
5457 image_id,
5458 vm_obj.config.name)
5459 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5460
5461
5462 def find_disk(self, vm_obj):
5463 """
5464 Method to find details of existing disk in VM
5465 Args :
5466 vm_obj - vCenter object of VM
5467 image_id - Catalog ID
5468 Returns:
5469 disk_info : dict of disk details
5470 """
5471 disk_info = {}
5472 if vm_obj:
5473 try:
5474 devices = vm_obj.config.hardware.device
5475 for device in devices:
5476 if type(device) is vim.vm.device.VirtualDisk:
5477 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5478 disk_info["full_path"] = device.backing.fileName
5479 disk_info["datastore"] = device.backing.datastore
5480 disk_info["capacityKB"] = device.capacityInKB
5481 break
5482 except Exception as exp:
5483 self.logger.error("find_disk() : exception occurred while "\
5484 "getting existing disk details :{}".format(exp))
5485 return disk_info
5486
5487
5488 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5489 """
5490 Method to add existing disk in VM
5491 Args :
5492 vcenter_conect - vCenter content object
5493 vm - vCenter vm object
5494 disk_info : dict of disk details
5495 Returns:
5496 status : status of add disk task
5497 """
5498 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5499 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5500 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5501 if size is not None:
5502 #Convert size from GB to KB
5503 sizeKB = int(size) * 1024 * 1024
5504 #compare size of existing disk and user given size.Assign whicherver is greater
5505 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5506 sizeKB, capacityKB))
5507 if sizeKB > capacityKB:
5508 capacityKB = sizeKB
5509
5510 if datastore and fullpath and capacityKB:
5511 try:
5512 spec = vim.vm.ConfigSpec()
5513 # get all disks on a VM, set unit_number to the next available
5514 unit_number = 0
5515 for dev in vm.config.hardware.device:
5516 if hasattr(dev.backing, 'fileName'):
5517 unit_number = int(dev.unitNumber) + 1
5518 # unit_number 7 reserved for scsi controller
5519 if unit_number == 7:
5520 unit_number += 1
5521 if isinstance(dev, vim.vm.device.VirtualDisk):
5522 #vim.vm.device.VirtualSCSIController
5523 controller_key = dev.controllerKey
5524
5525 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5526 unit_number, controller_key))
5527 # add disk here
5528 dev_changes = []
5529 disk_spec = vim.vm.device.VirtualDeviceSpec()
5530 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5531 disk_spec.device = vim.vm.device.VirtualDisk()
5532 disk_spec.device.backing = \
5533 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5534 disk_spec.device.backing.thinProvisioned = True
5535 disk_spec.device.backing.diskMode = 'persistent'
5536 disk_spec.device.backing.datastore = datastore
5537 disk_spec.device.backing.fileName = fullpath
5538
5539 disk_spec.device.unitNumber = unit_number
5540 disk_spec.device.capacityInKB = capacityKB
5541 disk_spec.device.controllerKey = controller_key
5542 dev_changes.append(disk_spec)
5543 spec.deviceChange = dev_changes
5544 task = vm.ReconfigVM_Task(spec=spec)
5545 status = self.wait_for_vcenter_task(task, vcenter_conect)
5546 return status
5547 except Exception as exp:
5548 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5549 "{} to vm {}".format(exp,
5550 fullpath,
5551 vm.config.name)
5552 self.rollback_newvm(vapp_uuid, exp_msg)
5553 else:
5554 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5555 self.rollback_newvm(vapp_uuid, msg)
5556
5557
5558 def get_vcenter_content(self):
5559 """
5560 Get the vsphere content object
5561 """
5562 try:
5563 vm_vcenter_info = self.get_vm_vcenter_info()
5564 except Exception as exp:
5565 self.logger.error("Error occurred while getting vCenter infromationn"\
5566 " for VM : {}".format(exp))
5567 raise vimconn.vimconnException(message=exp)
5568
5569 context = None
5570 if hasattr(ssl, '_create_unverified_context'):
5571 context = ssl._create_unverified_context()
5572
5573 vcenter_conect = SmartConnect(
5574 host=vm_vcenter_info["vm_vcenter_ip"],
5575 user=vm_vcenter_info["vm_vcenter_user"],
5576 pwd=vm_vcenter_info["vm_vcenter_password"],
5577 port=int(vm_vcenter_info["vm_vcenter_port"]),
5578 sslContext=context
5579 )
5580 atexit.register(Disconnect, vcenter_conect)
5581 content = vcenter_conect.RetrieveContent()
5582 return vcenter_conect, content
5583
5584
5585 def get_vm_moref_id(self, vapp_uuid):
5586 """
5587 Get the moref_id of given VM
5588 """
5589 try:
5590 if vapp_uuid:
5591 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5592 if vm_details and "vm_vcenter_info" in vm_details:
5593 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5594 return vm_moref_id
5595
5596 except Exception as exp:
5597 self.logger.error("Error occurred while getting VM moref ID "\
5598 " for VM : {}".format(exp))
5599 return None
5600
5601
5602 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5603 """
5604 Method to get vApp template details
5605 Args :
5606 catalogs - list of VDC catalogs
5607 image_id - Catalog ID to find
5608 template_name : template name in catalog
5609 Returns:
5610 parsed_respond : dict of vApp tempalte details
5611 """
5612 parsed_response = {}
5613
5614 vca = self.connect_as_admin()
5615 if not vca:
5616 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5617
5618 try:
5619 org, vdc = self.get_vdc_details()
5620 catalog = self.get_catalog_obj(image_id, catalogs)
5621 if catalog:
5622 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5623 catalog_items = [items.attrib]
5624
5625 if len(catalog_items) == 1:
5626 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5627 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5628
5629 response = self.perform_request(req_type='GET',
5630 url=catalog_items[0].get('href'),
5631 headers=headers)
5632 catalogItem = XmlElementTree.fromstring(response.content)
5633 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5634 vapp_tempalte_href = entity.get("href")
5635 #get vapp details and parse moref id
5636
5637 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5638 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5639 'vmw': 'http://www.vmware.com/schema/ovf',
5640 'vm': 'http://www.vmware.com/vcloud/v1.5',
5641 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5642 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5643 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5644 }
5645
5646 if vca._session:
5647 response = self.perform_request(req_type='GET',
5648 url=vapp_tempalte_href,
5649 headers=headers)
5650
5651 if response.status_code != requests.codes.ok:
5652 self.logger.debug("REST API call {} failed. Return status code {}".format(
5653 vapp_tempalte_href, response.status_code))
5654
5655 else:
5656 xmlroot_respond = XmlElementTree.fromstring(response.content)
5657 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5658 if children_section is not None:
5659 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5660 if vCloud_extension_section is not None:
5661 vm_vcenter_info = {}
5662 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5663 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5664 if vmext is not None:
5665 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5666 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5667
5668 except Exception as exp :
5669 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5670
5671 return parsed_response
5672
5673
5674 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5675 """
5676 Method to delete vApp
5677 Args :
5678 vapp_uuid - vApp UUID
5679 msg - Error message to be logged
5680 exp_type : Exception type
5681 Returns:
5682 None
5683 """
5684 if vapp_uuid:
5685 status = self.delete_vminstance(vapp_uuid)
5686 else:
5687 msg = "No vApp ID"
5688 self.logger.error(msg)
5689 if exp_type == "Genric":
5690 raise vimconn.vimconnException(msg)
5691 elif exp_type == "NotFound":
5692 raise vimconn.vimconnNotFoundException(message=msg)
5693
5694 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5695 """
5696 Method to attach SRIOV adapters to VM
5697
5698 Args:
5699 vapp_uuid - uuid of vApp/VM
5700 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5701 vmname_andid - vmname
5702
5703 Returns:
5704 The status of add SRIOV adapter task , vm object and
5705 vcenter_conect object
5706 """
5707 vm_obj = None
5708 vcenter_conect, content = self.get_vcenter_content()
5709 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5710
5711 if vm_moref_id:
5712 try:
5713 no_of_sriov_devices = len(sriov_nets)
5714 if no_of_sriov_devices > 0:
5715 #Get VM and its host
5716 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5717 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5718 if host_obj and vm_obj:
5719 #get SRIOV devies from host on which vapp is currently installed
5720 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5721 no_of_sriov_devices,
5722 )
5723
5724 if len(avilable_sriov_devices) == 0:
5725 #find other hosts with active pci devices
5726 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5727 content,
5728 no_of_sriov_devices,
5729 )
5730
5731 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5732 #Migrate vm to the host where SRIOV devices are available
5733 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5734 new_host_obj))
5735 task = self.relocate_vm(new_host_obj, vm_obj)
5736 if task is not None:
5737 result = self.wait_for_vcenter_task(task, vcenter_conect)
5738 self.logger.info("Migrate VM status: {}".format(result))
5739 host_obj = new_host_obj
5740 else:
5741 self.logger.info("Fail to migrate VM : {}".format(result))
5742 raise vimconn.vimconnNotFoundException(
5743 "Fail to migrate VM : {} to host {}".format(
5744 vmname_andid,
5745 new_host_obj)
5746 )
5747
5748 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5749 #Add SRIOV devices one by one
5750 for sriov_net in sriov_nets:
5751 network_name = sriov_net.get('net_id')
5752 dvs_portgr_name = self.create_dvPort_group(network_name)
5753 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
5754 #add vlan ID ,Modify portgroup for vlan ID
5755 self.configure_vlanID(content, vcenter_conect, network_name)
5756
5757 task = self.add_sriov_to_vm(content,
5758 vm_obj,
5759 host_obj,
5760 network_name,
5761 avilable_sriov_devices[0]
5762 )
5763 if task:
5764 status= self.wait_for_vcenter_task(task, vcenter_conect)
5765 if status:
5766 self.logger.info("Added SRIOV {} to VM {}".format(
5767 no_of_sriov_devices,
5768 str(vm_obj)))
5769 else:
5770 self.logger.error("Fail to add SRIOV {} to VM {}".format(
5771 no_of_sriov_devices,
5772 str(vm_obj)))
5773 raise vimconn.vimconnUnexpectedResponse(
5774 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
5775 )
5776 return True, vm_obj, vcenter_conect
5777 else:
5778 self.logger.error("Currently there is no host with"\
5779 " {} number of avaialble SRIOV "\
5780 "VFs required for VM {}".format(
5781 no_of_sriov_devices,
5782 vmname_andid)
5783 )
5784 raise vimconn.vimconnNotFoundException(
5785 "Currently there is no host with {} "\
5786 "number of avaialble SRIOV devices required for VM {}".format(
5787 no_of_sriov_devices,
5788 vmname_andid))
5789 else:
5790 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
5791
5792 except vmodl.MethodFault as error:
5793 self.logger.error("Error occurred while adding SRIOV {} ",error)
5794 return None, vm_obj, vcenter_conect
5795
5796
5797 def get_sriov_devices(self,host, no_of_vfs):
5798 """
5799 Method to get the details of SRIOV devices on given host
5800 Args:
5801 host - vSphere host object
5802 no_of_vfs - number of VFs needed on host
5803
5804 Returns:
5805 array of SRIOV devices
5806 """
5807 sriovInfo=[]
5808 if host:
5809 for device in host.config.pciPassthruInfo:
5810 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
5811 if device.numVirtualFunction >= no_of_vfs:
5812 sriovInfo.append(device)
5813 break
5814 return sriovInfo
5815
5816
5817 def get_host_and_sriov_devices(self, content, no_of_vfs):
5818 """
5819 Method to get the details of SRIOV devices infromation on all hosts
5820
5821 Args:
5822 content - vSphere host object
5823 no_of_vfs - number of pci VFs needed on host
5824
5825 Returns:
5826 array of SRIOV devices and host object
5827 """
5828 host_obj = None
5829 sriov_device_objs = None
5830 try:
5831 if content:
5832 container = content.viewManager.CreateContainerView(content.rootFolder,
5833 [vim.HostSystem], True)
5834 for host in container.view:
5835 devices = self.get_sriov_devices(host, no_of_vfs)
5836 if devices:
5837 host_obj = host
5838 sriov_device_objs = devices
5839 break
5840 except Exception as exp:
5841 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
5842
5843 return host_obj,sriov_device_objs
5844
5845
5846 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
5847 """
5848 Method to add SRIOV adapter to vm
5849
5850 Args:
5851 host_obj - vSphere host object
5852 vm_obj - vSphere vm object
5853 content - vCenter content object
5854 network_name - name of distributed virtaul portgroup
5855 sriov_device - SRIOV device info
5856
5857 Returns:
5858 task object
5859 """
5860 devices = []
5861 vnic_label = "sriov nic"
5862 try:
5863 dvs_portgr = self.get_dvport_group(network_name)
5864 network_name = dvs_portgr.name
5865 nic = vim.vm.device.VirtualDeviceSpec()
5866 # VM device
5867 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5868 nic.device = vim.vm.device.VirtualSriovEthernetCard()
5869 nic.device.addressType = 'assigned'
5870 #nic.device.key = 13016
5871 nic.device.deviceInfo = vim.Description()
5872 nic.device.deviceInfo.label = vnic_label
5873 nic.device.deviceInfo.summary = network_name
5874 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
5875
5876 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
5877 nic.device.backing.deviceName = network_name
5878 nic.device.backing.useAutoDetect = False
5879 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
5880 nic.device.connectable.startConnected = True
5881 nic.device.connectable.allowGuestControl = True
5882
5883 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
5884 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
5885 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
5886
5887 devices.append(nic)
5888 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
5889 task = vm_obj.ReconfigVM_Task(vmconf)
5890 return task
5891 except Exception as exp:
5892 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
5893 return None
5894
5895
5896 def create_dvPort_group(self, network_name):
5897 """
5898 Method to create disributed virtual portgroup
5899
5900 Args:
5901 network_name - name of network/portgroup
5902
5903 Returns:
5904 portgroup key
5905 """
5906 try:
5907 new_network_name = [network_name, '-', str(uuid.uuid4())]
5908 network_name=''.join(new_network_name)
5909 vcenter_conect, content = self.get_vcenter_content()
5910
5911 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
5912 if dv_switch:
5913 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5914 dv_pg_spec.name = network_name
5915
5916 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
5917 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5918 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
5919 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
5920 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
5921 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
5922
5923 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
5924 self.wait_for_vcenter_task(task, vcenter_conect)
5925
5926 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
5927 if dvPort_group:
5928 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
5929 return dvPort_group.key
5930 else:
5931 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
5932
5933 except Exception as exp:
5934 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
5935 " : {}".format(network_name, exp))
5936 return None
5937
5938 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
5939 """
5940 Method to reconfigure disributed virtual portgroup
5941
5942 Args:
5943 dvPort_group_name - name of disributed virtual portgroup
5944 content - vCenter content object
5945 config_info - disributed virtual portgroup configuration
5946
5947 Returns:
5948 task object
5949 """
5950 try:
5951 dvPort_group = self.get_dvport_group(dvPort_group_name)
5952 if dvPort_group:
5953 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5954 dv_pg_spec.configVersion = dvPort_group.config.configVersion
5955 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5956 if "vlanID" in config_info:
5957 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
5958 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
5959
5960 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
5961 return task
5962 else:
5963 return None
5964 except Exception as exp:
5965 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
5966 " : {}".format(dvPort_group_name, exp))
5967 return None
5968
5969
5970 def destroy_dvport_group(self , dvPort_group_name):
5971 """
5972 Method to destroy disributed virtual portgroup
5973
5974 Args:
5975 network_name - name of network/portgroup
5976
5977 Returns:
5978 True if portgroup successfully got deleted else false
5979 """
5980 vcenter_conect, content = self.get_vcenter_content()
5981 try:
5982 status = None
5983 dvPort_group = self.get_dvport_group(dvPort_group_name)
5984 if dvPort_group:
5985 task = dvPort_group.Destroy_Task()
5986 status = self.wait_for_vcenter_task(task, vcenter_conect)
5987 return status
5988 except vmodl.MethodFault as exp:
5989 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
5990 exp, dvPort_group_name))
5991 return None
5992
5993
5994 def get_dvport_group(self, dvPort_group_name):
5995 """
5996 Method to get disributed virtual portgroup
5997
5998 Args:
5999 network_name - name of network/portgroup
6000
6001 Returns:
6002 portgroup object
6003 """
6004 vcenter_conect, content = self.get_vcenter_content()
6005 dvPort_group = None
6006 try:
6007 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6008 for item in container.view:
6009 if item.key == dvPort_group_name:
6010 dvPort_group = item
6011 break
6012 return dvPort_group
6013 except vmodl.MethodFault as exp:
6014 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6015 exp, dvPort_group_name))
6016 return None
6017
6018 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6019 """
6020 Method to get disributed virtual portgroup vlanID
6021
6022 Args:
6023 network_name - name of network/portgroup
6024
6025 Returns:
6026 vlan ID
6027 """
6028 vlanId = None
6029 try:
6030 dvPort_group = self.get_dvport_group(dvPort_group_name)
6031 if dvPort_group:
6032 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6033 except vmodl.MethodFault as exp:
6034 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6035 exp, dvPort_group_name))
6036 return vlanId
6037
6038
6039 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6040 """
6041 Method to configure vlanID in disributed virtual portgroup vlanID
6042
6043 Args:
6044 network_name - name of network/portgroup
6045
6046 Returns:
6047 None
6048 """
6049 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6050 if vlanID == 0:
6051 #configure vlanID
6052 vlanID = self.genrate_vlanID(dvPort_group_name)
6053 config = {"vlanID":vlanID}
6054 task = self.reconfig_portgroup(content, dvPort_group_name,
6055 config_info=config)
6056 if task:
6057 status= self.wait_for_vcenter_task(task, vcenter_conect)
6058 if status:
6059 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6060 dvPort_group_name,vlanID))
6061 else:
6062 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6063 dvPort_group_name, vlanID))
6064
6065
6066 def genrate_vlanID(self, network_name):
6067 """
6068 Method to get unused vlanID
6069 Args:
6070 network_name - name of network/portgroup
6071 Returns:
6072 vlanID
6073 """
6074 vlan_id = None
6075 used_ids = []
6076 if self.config.get('vlanID_range') == None:
6077 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6078 "at config value before creating sriov network with vlan tag")
6079 if "used_vlanIDs" not in self.persistent_info:
6080 self.persistent_info["used_vlanIDs"] = {}
6081 else:
6082 used_ids = self.persistent_info["used_vlanIDs"].values()
6083 #For python3
6084 #used_ids = list(self.persistent_info["used_vlanIDs"].values())
6085
6086 for vlanID_range in self.config.get('vlanID_range'):
6087 start_vlanid , end_vlanid = vlanID_range.split("-")
6088 if start_vlanid > end_vlanid:
6089 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6090 vlanID_range))
6091
6092 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
6093 #For python3
6094 #for id in range(int(start_vlanid), int(end_vlanid) + 1):
6095 if id not in used_ids:
6096 vlan_id = id
6097 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6098 return vlan_id
6099 if vlan_id is None:
6100 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6101
6102
6103 def get_obj(self, content, vimtype, name):
6104 """
6105 Get the vsphere object associated with a given text name
6106 """
6107 obj = None
6108 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6109 for item in container.view:
6110 if item.name == name:
6111 obj = item
6112 break
6113 return obj
6114
6115
6116 def insert_media_to_vm(self, vapp, image_id):
6117 """
6118 Method to insert media CD-ROM (ISO image) from catalog to vm.
6119 vapp - vapp object to get vm id
6120 Image_id - image id for cdrom to be inerted to vm
6121 """
6122 # create connection object
6123 vca = self.connect()
6124 try:
6125 # fetching catalog details
6126 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6127 if vca._session:
6128 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6129 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6130 response = self.perform_request(req_type='GET',
6131 url=rest_url,
6132 headers=headers)
6133
6134 if response.status_code != 200:
6135 self.logger.error("REST call {} failed reason : {}"\
6136 "status code : {}".format(url_rest_call,
6137 response.content,
6138 response.status_code))
6139 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6140 "catalog details")
6141 # searching iso name and id
6142 iso_name,media_id = self.get_media_details(vca, response.content)
6143
6144 if iso_name and media_id:
6145 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6146 <ns6:MediaInsertOrEjectParams
6147 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6148 <ns6:Media
6149 type="application/vnd.vmware.vcloud.media+xml"
6150 name="{}.iso"
6151 id="urn:vcloud:media:{}"
6152 href="https://{}/api/media/{}"/>
6153 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6154 self.url,media_id)
6155
6156 for vms in vapp.get_all_vms():
6157 vm_id = vms.get('id').split(':')[-1]
6158
6159 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6160 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6161
6162 response = self.perform_request(req_type='POST',
6163 url=rest_url,
6164 data=data,
6165 headers=headers)
6166
6167 if response.status_code != 202:
6168 self.logger.error("Failed to insert CD-ROM to vm")
6169 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
6170 "ISO image to vm")
6171 else:
6172 task = self.get_task_from_response(response.content)
6173 result = self.client.get_task_monitor().wait_for_success(task=task)
6174 if result.get('status') == 'success':
6175 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6176 " image to vm {}".format(vm_id))
6177
6178 except Exception as exp:
6179 self.logger.error("insert_media_to_vm() : exception occurred "\
6180 "while inserting media CD-ROM")
6181 raise vimconn.vimconnException(message=exp)
6182
6183
6184 def get_media_details(self, vca, content):
6185 """
6186 Method to get catalog item details
6187 vca - connection object
6188 content - Catalog details
6189 Return - Media name, media id
6190 """
6191 cataloghref_list = []
6192 try:
6193 if content:
6194 vm_list_xmlroot = XmlElementTree.fromstring(content)
6195 for child in vm_list_xmlroot.iter():
6196 if 'CatalogItem' in child.tag:
6197 cataloghref_list.append(child.attrib.get('href'))
6198 if cataloghref_list is not None:
6199 for href in cataloghref_list:
6200 if href:
6201 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6202 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6203 response = self.perform_request(req_type='GET',
6204 url=href,
6205 headers=headers)
6206 if response.status_code != 200:
6207 self.logger.error("REST call {} failed reason : {}"\
6208 "status code : {}".format(href,
6209 response.content,
6210 response.status_code))
6211 raise vimconn.vimconnException("get_media_details : Failed to get "\
6212 "catalogitem details")
6213 list_xmlroot = XmlElementTree.fromstring(response.content)
6214 for child in list_xmlroot.iter():
6215 if 'Entity' in child.tag:
6216 if 'media' in child.attrib.get('href'):
6217 name = child.attrib.get('name')
6218 media_id = child.attrib.get('href').split('/').pop()
6219 return name,media_id
6220 else:
6221 self.logger.debug("Media name and id not found")
6222 return False,False
6223 except Exception as exp:
6224 self.logger.error("get_media_details : exception occurred "\
6225 "getting media details")
6226 raise vimconn.vimconnException(message=exp)
6227
6228
6229 def retry_rest(self, method, url, add_headers=None, data=None):
6230 """ Method to get Token & retry respective REST request
6231 Args:
6232 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6233 url - request url to be used
6234 add_headers - Additional headers (optional)
6235 data - Request payload data to be passed in request
6236 Returns:
6237 response - Response of request
6238 """
6239 response = None
6240
6241 #Get token
6242 self.get_token()
6243
6244 if self.client._session:
6245 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6246 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6247
6248 if add_headers:
6249 headers.update(add_headers)
6250
6251 if method == 'GET':
6252 response = self.perform_request(req_type='GET',
6253 url=url,
6254 headers=headers)
6255 elif method == 'PUT':
6256 response = self.perform_request(req_type='PUT',
6257 url=url,
6258 headers=headers,
6259 data=data)
6260 elif method == 'POST':
6261 response = self.perform_request(req_type='POST',
6262 url=url,
6263 headers=headers,
6264 data=data)
6265 elif method == 'DELETE':
6266 response = self.perform_request(req_type='DELETE',
6267 url=url,
6268 headers=headers)
6269 return response
6270
6271
6272 def get_token(self):
6273 """ Generate a new token if expired
6274
6275 Returns:
6276 The return client object that letter can be used to connect to vCloud director as admin for VDC
6277 """
6278 try:
6279 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6280 self.user,
6281 self.org_name))
6282 host = self.url
6283 client = Client(host, verify_ssl_certs=False)
6284 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6285 # connection object
6286 self.client = client
6287
6288 except:
6289 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6290 "{} as user: {}".format(self.org_name, self.user))
6291
6292 if not client:
6293 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6294
6295
6296 def get_vdc_details(self):
6297 """ Get VDC details using pyVcloud Lib
6298
6299 Returns org and vdc object
6300 """
6301 vdc = None
6302 try:
6303 org = Org(self.client, resource=self.client.get_org())
6304 vdc = org.get_vdc(self.tenant_name)
6305 except Exception as e:
6306 # pyvcloud not giving a specific exception, Refresh nevertheless
6307 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6308
6309 #Retry once, if failed by refreshing token
6310 if vdc is None:
6311 self.get_token()
6312 org = Org(self.client, resource=self.client.get_org())
6313 vdc = org.get_vdc(self.tenant_name)
6314
6315 return org, vdc
6316
6317
6318 def perform_request(self, req_type, url, headers=None, data=None):
6319 """Perform the POST/PUT/GET/DELETE request."""
6320
6321 #Log REST request details
6322 self.log_request(req_type, url=url, headers=headers, data=data)
6323 # perform request and return its result
6324 if req_type == 'GET':
6325 response = requests.get(url=url,
6326 headers=headers,
6327 verify=False)
6328 elif req_type == 'PUT':
6329 response = requests.put(url=url,
6330 headers=headers,
6331 data=data,
6332 verify=False)
6333 elif req_type == 'POST':
6334 response = requests.post(url=url,
6335 headers=headers,
6336 data=data,
6337 verify=False)
6338 elif req_type == 'DELETE':
6339 response = requests.delete(url=url,
6340 headers=headers,
6341 verify=False)
6342 #Log the REST response
6343 self.log_response(response)
6344
6345 return response
6346
6347
6348 def log_request(self, req_type, url=None, headers=None, data=None):
6349 """Logs REST request details"""
6350
6351 if req_type is not None:
6352 self.logger.debug("Request type: {}".format(req_type))
6353
6354 if url is not None:
6355 self.logger.debug("Request url: {}".format(url))
6356
6357 if headers is not None:
6358 for header in headers:
6359 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6360
6361 if data is not None:
6362 self.logger.debug("Request data: {}".format(data))
6363
6364
6365 def log_response(self, response):
6366 """Logs REST response details"""
6367
6368 self.logger.debug("Response status code: {} ".format(response.status_code))
6369
6370
6371 def get_task_from_response(self, content):
6372 """
6373 content - API response content(response.content)
6374 return task object
6375 """
6376 xmlroot = XmlElementTree.fromstring(content)
6377 if xmlroot.tag.split('}')[1] == "Task":
6378 return xmlroot
6379 else:
6380 for ele in xmlroot:
6381 if ele.tag.split("}")[1] == "Tasks":
6382 task = ele[0]
6383 break
6384 return task
6385
6386
6387 def power_on_vapp(self,vapp_id, vapp_name):
6388 """
6389 vapp_id - vApp uuid
6390 vapp_name - vAapp name
6391 return - Task object
6392 """
6393 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6394 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6395
6396 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6397 vapp_id)
6398 response = self.perform_request(req_type='POST',
6399 url=poweron_href,
6400 headers=headers)
6401
6402 if response.status_code != 202:
6403 self.logger.error("REST call {} failed reason : {}"\
6404 "status code : {} ".format(poweron_href,
6405 response.content,
6406 response.status_code))
6407 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6408 "vApp {}".format(vapp_name))
6409 else:
6410 poweron_task = self.get_task_from_response(response.content)
6411 return poweron_task
6412
6413