c0d7f0208f6a85d559e78e576deca9b9832e07ea
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
46 from pyvcloud.vcd.vdc import VDC
47 from pyvcloud.vcd.org import Org
48 import re
49 from pyvcloud.vcd.vapp import VApp
50 from xml.sax.saxutils import escape
51 import logging
52 import json
53 import time
54 import uuid
55 import httplib
56 #For python3
57 #import http.client
58 import hashlib
59 import socket
60 import struct
61 import netaddr
62 import random
63
64 # global variable for vcd connector type
65 STANDALONE = 'standalone'
66
67 # key for flavor dicts
68 FLAVOR_RAM_KEY = 'ram'
69 FLAVOR_VCPUS_KEY = 'vcpus'
70 FLAVOR_DISK_KEY = 'disk'
71 DEFAULT_IP_PROFILE = {'dhcp_count':50,
72 'dhcp_enabled':True,
73 'ip_version':"IPv4"
74 }
75 # global variable for wait time
76 INTERVAL_TIME = 5
77 MAX_WAIT_TIME = 1800
78
79 API_VERSION = '5.9'
80
81 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
82 __date__ = "$09-Mar-2018 11:09:29$"
83 __version__ = '0.2'
84
85 # -1: "Could not be created",
86 # 0: "Unresolved",
87 # 1: "Resolved",
88 # 2: "Deployed",
89 # 3: "Suspended",
90 # 4: "Powered on",
91 # 5: "Waiting for user input",
92 # 6: "Unknown state",
93 # 7: "Unrecognized state",
94 # 8: "Powered off",
95 # 9: "Inconsistent state",
96 # 10: "Children do not all have the same status",
97 # 11: "Upload initiated, OVF descriptor pending",
98 # 12: "Upload initiated, copying contents",
99 # 13: "Upload initiated , disk contents pending",
100 # 14: "Upload has been quarantined",
101 # 15: "Upload quarantine period has expired"
102
103 # mapping vCD status to MANO
104 vcdStatusCode2manoFormat = {4: 'ACTIVE',
105 7: 'PAUSED',
106 3: 'SUSPENDED',
107 8: 'INACTIVE',
108 12: 'BUILD',
109 -1: 'ERROR',
110 14: 'DELETED'}
111
112 #
113 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
114 'ERROR': 'ERROR', 'DELETED': 'DELETED'
115 }
116
117 class vimconnector(vimconn.vimconnector):
118 # dict used to store flavor in memory
119 flavorlist = {}
120
121 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
122 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
123 """
124 Constructor create vmware connector to vCloud director.
125
126 By default construct doesn't validate connection state. So client can create object with None arguments.
127 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
128
129 a) It initialize organization UUID
130 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
131
132 Args:
133 uuid - is organization uuid.
134 name - is organization name that must be presented in vCloud director.
135 tenant_id - is VDC uuid it must be presented in vCloud director
136 tenant_name - is VDC name.
137 url - is hostname or ip address of vCloud director
138 url_admin - same as above.
139 user - is user that administrator for organization. Caller must make sure that
140 username has right privileges.
141
142 password - is password for a user.
143
144 VMware connector also requires PVDC administrative privileges and separate account.
145 This variables must be passed via config argument dict contains keys
146
147 dict['admin_username']
148 dict['admin_password']
149 config - Provide NSX and vCenter information
150
151 Returns:
152 Nothing.
153 """
154
155 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
156 url_admin, user, passwd, log_level, config)
157
158 self.logger = logging.getLogger('openmano.vim.vmware')
159 self.logger.setLevel(10)
160 self.persistent_info = persistent_info
161
162 self.name = name
163 self.id = uuid
164 self.url = url
165 self.url_admin = url_admin
166 self.tenant_id = tenant_id
167 self.tenant_name = tenant_name
168 self.user = user
169 self.passwd = passwd
170 self.config = config
171 self.admin_password = None
172 self.admin_user = None
173 self.org_name = ""
174 self.nsx_manager = None
175 self.nsx_user = None
176 self.nsx_password = None
177 self.availability_zone = None
178
179 # Disable warnings from self-signed certificates.
180 requests.packages.urllib3.disable_warnings()
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 #Set availability zone for Affinity rules
214 self.availability_zone = self.set_availability_zones()
215
216 # ############# Stub code for SRIOV #################
217 # try:
218 # self.dvs_name = config['dv_switch_name']
219 # except KeyError:
220 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
221 #
222 # self.vlanID_range = config.get("vlanID_range", None)
223
224 self.org_uuid = None
225 self.client = None
226
227 if not url:
228 raise vimconn.vimconnException('url param can not be NoneType')
229
230 if not self.url_admin: # try to use normal url
231 self.url_admin = self.url
232
233 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
234 self.tenant_id, self.tenant_name))
235 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
236 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
237
238 # initialize organization
239 if self.user is not None and self.passwd is not None and self.url:
240 self.init_organization()
241
242 def __getitem__(self, index):
243 if index == 'name':
244 return self.name
245 if index == 'tenant_id':
246 return self.tenant_id
247 if index == 'tenant_name':
248 return self.tenant_name
249 elif index == 'id':
250 return self.id
251 elif index == 'org_name':
252 return self.org_name
253 elif index == 'org_uuid':
254 return self.org_uuid
255 elif index == 'user':
256 return self.user
257 elif index == 'passwd':
258 return self.passwd
259 elif index == 'url':
260 return self.url
261 elif index == 'url_admin':
262 return self.url_admin
263 elif index == "config":
264 return self.config
265 else:
266 raise KeyError("Invalid key '%s'" % str(index))
267
268 def __setitem__(self, index, value):
269 if index == 'name':
270 self.name = value
271 if index == 'tenant_id':
272 self.tenant_id = value
273 if index == 'tenant_name':
274 self.tenant_name = value
275 elif index == 'id':
276 self.id = value
277 elif index == 'org_name':
278 self.org_name = value
279 elif index == 'org_uuid':
280 self.org_uuid = value
281 elif index == 'user':
282 self.user = value
283 elif index == 'passwd':
284 self.passwd = value
285 elif index == 'url':
286 self.url = value
287 elif index == 'url_admin':
288 self.url_admin = value
289 else:
290 raise KeyError("Invalid key '%s'" % str(index))
291
292 def connect_as_admin(self):
293 """ Method connect as pvdc admin user to vCloud director.
294 There are certain action that can be done only by provider vdc admin user.
295 Organization creation / provider network creation etc.
296
297 Returns:
298 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
299 """
300
301 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
302
303 try:
304 host = self.url
305 org = 'System'
306 client_as_admin = Client(host, verify_ssl_certs=False)
307 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
308 except Exception as e:
309 raise vimconn.vimconnException(
310 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
311
312 return client_as_admin
313
314 def connect(self):
315 """ Method connect as normal user to vCloud director.
316
317 Returns:
318 The return client object that latter can be used to connect to vCloud director as admin for VDC
319 """
320
321 try:
322 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
323 self.user,
324 self.org_name))
325 host = self.url
326 client = Client(host, verify_ssl_certs=False)
327 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
328 except:
329 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
330 "{} as user: {}".format(self.org_name, self.user))
331
332 return client
333
334 def init_organization(self):
335 """ Method initialize organization UUID and VDC parameters.
336
337 At bare minimum client must provide organization name that present in vCloud director and VDC.
338
339 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
340 The Org - UUID will be initialized at the run time if data center present in vCloud director.
341
342 Returns:
343 The return vca object that letter can be used to connect to vcloud direct as admin
344 """
345 client = self.connect()
346 if not client:
347 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
348
349 self.client = client
350 try:
351 if self.org_uuid is None:
352 org_list = client.get_org_list()
353 for org in org_list.Org:
354 # we set org UUID at the init phase but we can do it only when we have valid credential.
355 if org.get('name') == self.org_name:
356 self.org_uuid = org.get('href').split('/')[-1]
357 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
358 break
359 else:
360 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
361
362 # if well good we require for org details
363 org_details_dict = self.get_org(org_uuid=self.org_uuid)
364
365 # we have two case if we want to initialize VDC ID or VDC name at run time
366 # tenant_name provided but no tenant id
367 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
368 vdcs_dict = org_details_dict['vdcs']
369 for vdc in vdcs_dict:
370 if vdcs_dict[vdc] == self.tenant_name:
371 self.tenant_id = vdc
372 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
373 self.org_name))
374 break
375 else:
376 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
377 # case two we have tenant_id but we don't have tenant name so we find and set it.
378 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
379 vdcs_dict = org_details_dict['vdcs']
380 for vdc in vdcs_dict:
381 if vdc == self.tenant_id:
382 self.tenant_name = vdcs_dict[vdc]
383 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
384 self.org_name))
385 break
386 else:
387 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
388 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
389 except:
390 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
391 self.logger.debug(traceback.format_exc())
392 self.org_uuid = None
393
394 def new_tenant(self, tenant_name=None, tenant_description=None):
395 """ Method adds a new tenant to VIM with this name.
396 This action requires access to create VDC action in vCloud director.
397
398 Args:
399 tenant_name is tenant_name to be created.
400 tenant_description not used for this call
401
402 Return:
403 returns the tenant identifier in UUID format.
404 If action is failed method will throw vimconn.vimconnException method
405 """
406 vdc_task = self.create_vdc(vdc_name=tenant_name)
407 if vdc_task is not None:
408 vdc_uuid, value = vdc_task.popitem()
409 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
410 return vdc_uuid
411 else:
412 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
413
414 def delete_tenant(self, tenant_id=None):
415 """ Delete a tenant from VIM
416 Args:
417 tenant_id is tenant_id to be deleted.
418
419 Return:
420 returns the tenant identifier in UUID format.
421 If action is failed method will throw exception
422 """
423 vca = self.connect_as_admin()
424 if not vca:
425 raise vimconn.vimconnConnectionException("Failed to connect vCD")
426
427 if tenant_id is not None:
428 if vca._session:
429 #Get OrgVDC
430 url_list = [self.url, '/api/vdc/', tenant_id]
431 orgvdc_herf = ''.join(url_list)
432
433 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
434 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
435 response = self.perform_request(req_type='GET',
436 url=orgvdc_herf,
437 headers=headers)
438
439 if response.status_code != requests.codes.ok:
440 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
441 "Return status code {}".format(orgvdc_herf,
442 response.status_code))
443 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
444
445 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
446 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
447 #For python3
448 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
449 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
450 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
451 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
452
453 response = self.perform_request(req_type='DELETE',
454 url=vdc_remove_href,
455 headers=headers)
456
457 if response.status_code == 202:
458 time.sleep(5)
459 return tenant_id
460 else:
461 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
462 "Return status code {}".format(vdc_remove_href,
463 response.status_code))
464 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
465 else:
466 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
467 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
468
469
470 def get_tenant_list(self, filter_dict={}):
471 """Obtain tenants of VIM
472 filter_dict can contain the following keys:
473 name: filter by tenant name
474 id: filter by tenant uuid/id
475 <other VIM specific>
476 Returns the tenant list of dictionaries:
477 [{'name':'<name>, 'id':'<id>, ...}, ...]
478
479 """
480 org_dict = self.get_org(self.org_uuid)
481 vdcs_dict = org_dict['vdcs']
482
483 vdclist = []
484 try:
485 for k in vdcs_dict:
486 entry = {'name': vdcs_dict[k], 'id': k}
487 # if caller didn't specify dictionary we return all tenants.
488 if filter_dict is not None and filter_dict:
489 filtered_entry = entry.copy()
490 filtered_dict = set(entry.keys()) - set(filter_dict)
491 for unwanted_key in filtered_dict: del entry[unwanted_key]
492 if filter_dict == entry:
493 vdclist.append(filtered_entry)
494 else:
495 vdclist.append(entry)
496 except:
497 self.logger.debug("Error in get_tenant_list()")
498 self.logger.debug(traceback.format_exc())
499 raise vimconn.vimconnException("Incorrect state. {}")
500
501 return vdclist
502
503 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
504 """Adds a tenant network to VIM
505 net_name is the name
506 net_type can be 'bridge','data'.'ptp'.
507 ip_profile is a dict containing the IP parameters of the network
508 shared is a boolean
509 Returns the network identifier"""
510
511 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
512 .format(net_name, net_type, ip_profile, shared))
513
514 isshared = 'false'
515 if shared:
516 isshared = 'true'
517
518 # ############# Stub code for SRIOV #################
519 # if net_type == "data" or net_type == "ptp":
520 # if self.config.get('dv_switch_name') == None:
521 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
522 # network_uuid = self.create_dvPort_group(net_name)
523
524 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
525 ip_profile=ip_profile, isshared=isshared)
526 if network_uuid is not None:
527 return network_uuid
528 else:
529 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
530
531 def get_vcd_network_list(self):
532 """ Method available organization for a logged in tenant
533
534 Returns:
535 The return vca object that letter can be used to connect to vcloud direct as admin
536 """
537
538 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
539
540 if not self.tenant_name:
541 raise vimconn.vimconnConnectionException("Tenant name is empty.")
542
543 org, vdc = self.get_vdc_details()
544 if vdc is None:
545 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
546
547 vdc_uuid = vdc.get('id').split(":")[3]
548 if self.client._session:
549 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
550 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
551 response = self.perform_request(req_type='GET',
552 url=vdc.get('href'),
553 headers=headers)
554 if response.status_code != 200:
555 self.logger.error("Failed to get vdc content")
556 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
557 else:
558 content = XmlElementTree.fromstring(response.content)
559
560 network_list = []
561 try:
562 for item in content:
563 if item.tag.split('}')[-1] == 'AvailableNetworks':
564 for net in item:
565 response = self.perform_request(req_type='GET',
566 url=net.get('href'),
567 headers=headers)
568
569 if response.status_code != 200:
570 self.logger.error("Failed to get network content")
571 raise vimconn.vimconnNotFoundException("Failed to get network content")
572 else:
573 net_details = XmlElementTree.fromstring(response.content)
574
575 filter_dict = {}
576 net_uuid = net_details.get('id').split(":")
577 if len(net_uuid) != 4:
578 continue
579 else:
580 net_uuid = net_uuid[3]
581 # create dict entry
582 self.logger.debug("get_vcd_network_list(): Adding network {} "
583 "to a list vcd id {} network {}".format(net_uuid,
584 vdc_uuid,
585 net_details.get('name')))
586 filter_dict["name"] = net_details.get('name')
587 filter_dict["id"] = net_uuid
588 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
589 shared = True
590 else:
591 shared = False
592 filter_dict["shared"] = shared
593 filter_dict["tenant_id"] = vdc_uuid
594 if int(net_details.get('status')) == 1:
595 filter_dict["admin_state_up"] = True
596 else:
597 filter_dict["admin_state_up"] = False
598 filter_dict["status"] = "ACTIVE"
599 filter_dict["type"] = "bridge"
600 network_list.append(filter_dict)
601 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
602 except:
603 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
604 pass
605
606 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
607 return network_list
608
609 def get_network_list(self, filter_dict={}):
610 """Obtain tenant networks of VIM
611 Filter_dict can be:
612 name: network name OR/AND
613 id: network uuid OR/AND
614 shared: boolean OR/AND
615 tenant_id: tenant OR/AND
616 admin_state_up: boolean
617 status: 'ACTIVE'
618
619 [{key : value , key : value}]
620
621 Returns the network list of dictionaries:
622 [{<the fields at Filter_dict plus some VIM specific>}, ...]
623 List can be empty
624 """
625
626 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
627
628 if not self.tenant_name:
629 raise vimconn.vimconnConnectionException("Tenant name is empty.")
630
631 org, vdc = self.get_vdc_details()
632 if vdc is None:
633 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
634
635 try:
636 vdcid = vdc.get('id').split(":")[3]
637
638 if self.client._session:
639 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
640 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
641 response = self.perform_request(req_type='GET',
642 url=vdc.get('href'),
643 headers=headers)
644 if response.status_code != 200:
645 self.logger.error("Failed to get vdc content")
646 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
647 else:
648 content = XmlElementTree.fromstring(response.content)
649
650 network_list = []
651 for item in content:
652 if item.tag.split('}')[-1] == 'AvailableNetworks':
653 for net in item:
654 response = self.perform_request(req_type='GET',
655 url=net.get('href'),
656 headers=headers)
657
658 if response.status_code != 200:
659 self.logger.error("Failed to get network content")
660 raise vimconn.vimconnNotFoundException("Failed to get network content")
661 else:
662 net_details = XmlElementTree.fromstring(response.content)
663
664 filter_entry = {}
665 net_uuid = net_details.get('id').split(":")
666 if len(net_uuid) != 4:
667 continue
668 else:
669 net_uuid = net_uuid[3]
670 # create dict entry
671 self.logger.debug("get_network_list(): Adding net {}"
672 " to a list vcd id {} network {}".format(net_uuid,
673 vdcid,
674 net_details.get('name')))
675 filter_entry["name"] = net_details.get('name')
676 filter_entry["id"] = net_uuid
677 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
678 shared = True
679 else:
680 shared = False
681 filter_entry["shared"] = shared
682 filter_entry["tenant_id"] = vdcid
683 if int(net_details.get('status')) == 1:
684 filter_entry["admin_state_up"] = True
685 else:
686 filter_entry["admin_state_up"] = False
687 filter_entry["status"] = "ACTIVE"
688 filter_entry["type"] = "bridge"
689 filtered_entry = filter_entry.copy()
690
691 if filter_dict is not None and filter_dict:
692 # we remove all the key : value we don't care and match only
693 # respected field
694 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
695 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
696 if filter_dict == filter_entry:
697 network_list.append(filtered_entry)
698 else:
699 network_list.append(filtered_entry)
700 except Exception as e:
701 self.logger.debug("Error in get_network_list",exc_info=True)
702 if isinstance(e, vimconn.vimconnException):
703 raise
704 else:
705 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
706
707 self.logger.debug("Returning {}".format(network_list))
708 return network_list
709
710 def get_network(self, net_id):
711 """Method obtains network details of net_id VIM network
712 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
713
714 try:
715 org, vdc = self.get_vdc_details()
716 vdc_id = vdc.get('id').split(":")[3]
717 if self.client._session:
718 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
719 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
720 response = self.perform_request(req_type='GET',
721 url=vdc.get('href'),
722 headers=headers)
723 if response.status_code != 200:
724 self.logger.error("Failed to get vdc content")
725 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
726 else:
727 content = XmlElementTree.fromstring(response.content)
728
729 filter_dict = {}
730
731 for item in content:
732 if item.tag.split('}')[-1] == 'AvailableNetworks':
733 for net in item:
734 response = self.perform_request(req_type='GET',
735 url=net.get('href'),
736 headers=headers)
737
738 if response.status_code != 200:
739 self.logger.error("Failed to get network content")
740 raise vimconn.vimconnNotFoundException("Failed to get network content")
741 else:
742 net_details = XmlElementTree.fromstring(response.content)
743
744 vdc_network_id = net_details.get('id').split(":")
745 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
746 filter_dict["name"] = net_details.get('name')
747 filter_dict["id"] = vdc_network_id[3]
748 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
749 shared = True
750 else:
751 shared = False
752 filter_dict["shared"] = shared
753 filter_dict["tenant_id"] = vdc_id
754 if int(net_details.get('status')) == 1:
755 filter_dict["admin_state_up"] = True
756 else:
757 filter_dict["admin_state_up"] = False
758 filter_dict["status"] = "ACTIVE"
759 filter_dict["type"] = "bridge"
760 self.logger.debug("Returning {}".format(filter_dict))
761 return filter_dict
762 else:
763 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
764 except Exception as e:
765 self.logger.debug("Error in get_network")
766 self.logger.debug(traceback.format_exc())
767 if isinstance(e, vimconn.vimconnException):
768 raise
769 else:
770 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
771
772 return filter_dict
773
774 def delete_network(self, net_id):
775 """
776 Method Deletes a tenant network from VIM, provide the network id.
777
778 Returns the network identifier or raise an exception
779 """
780
781 # ############# Stub code for SRIOV #################
782 # dvport_group = self.get_dvport_group(net_id)
783 # if dvport_group:
784 # #delete portgroup
785 # status = self.destroy_dvport_group(net_id)
786 # if status:
787 # # Remove vlanID from persistent info
788 # if net_id in self.persistent_info["used_vlanIDs"]:
789 # del self.persistent_info["used_vlanIDs"][net_id]
790 #
791 # return net_id
792
793 vcd_network = self.get_vcd_network(network_uuid=net_id)
794 if vcd_network is not None and vcd_network:
795 if self.delete_network_action(network_uuid=net_id):
796 return net_id
797 else:
798 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
799
800 def refresh_nets_status(self, net_list):
801 """Get the status of the networks
802 Params: the list of network identifiers
803 Returns a dictionary with:
804 net_id: #VIM id of this network
805 status: #Mandatory. Text with one of:
806 # DELETED (not found at vim)
807 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
808 # OTHER (Vim reported other status not understood)
809 # ERROR (VIM indicates an ERROR status)
810 # ACTIVE, INACTIVE, DOWN (admin down),
811 # BUILD (on building process)
812 #
813 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
814 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
815
816 """
817
818 dict_entry = {}
819 try:
820 for net in net_list:
821 errormsg = ''
822 vcd_network = self.get_vcd_network(network_uuid=net)
823 if vcd_network is not None and vcd_network:
824 if vcd_network['status'] == '1':
825 status = 'ACTIVE'
826 else:
827 status = 'DOWN'
828 else:
829 status = 'DELETED'
830 errormsg = 'Network not found.'
831
832 dict_entry[net] = {'status': status, 'error_msg': errormsg,
833 'vim_info': yaml.safe_dump(vcd_network)}
834 except:
835 self.logger.debug("Error in refresh_nets_status")
836 self.logger.debug(traceback.format_exc())
837
838 return dict_entry
839
840 def get_flavor(self, flavor_id):
841 """Obtain flavor details from the VIM
842 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
843 """
844 if flavor_id not in vimconnector.flavorlist:
845 raise vimconn.vimconnNotFoundException("Flavor not found.")
846 return vimconnector.flavorlist[flavor_id]
847
848 def new_flavor(self, flavor_data):
849 """Adds a tenant flavor to VIM
850 flavor_data contains a dictionary with information, keys:
851 name: flavor name
852 ram: memory (cloud type) in MBytes
853 vpcus: cpus (cloud type)
854 extended: EPA parameters
855 - numas: #items requested in same NUMA
856 memory: number of 1G huge pages memory
857 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
858 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
859 - name: interface name
860 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
861 bandwidth: X Gbps; requested guarantee bandwidth
862 vpci: requested virtual PCI address
863 disk: disk size
864 is_public:
865 #TODO to concrete
866 Returns the flavor identifier"""
867
868 # generate a new uuid put to internal dict and return it.
869 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
870 new_flavor=flavor_data
871 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
872 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
873 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
874
875 if not isinstance(ram, int):
876 raise vimconn.vimconnException("Non-integer value for ram")
877 elif not isinstance(cpu, int):
878 raise vimconn.vimconnException("Non-integer value for cpu")
879 elif not isinstance(disk, int):
880 raise vimconn.vimconnException("Non-integer value for disk")
881
882 extended_flv = flavor_data.get("extended")
883 if extended_flv:
884 numas=extended_flv.get("numas")
885 if numas:
886 for numa in numas:
887 #overwrite ram and vcpus
888 if 'memory' in numa:
889 ram = numa['memory']*1024
890 if 'paired-threads' in numa:
891 cpu = numa['paired-threads']*2
892 elif 'cores' in numa:
893 cpu = numa['cores']
894 elif 'threads' in numa:
895 cpu = numa['threads']
896
897 new_flavor[FLAVOR_RAM_KEY] = ram
898 new_flavor[FLAVOR_VCPUS_KEY] = cpu
899 new_flavor[FLAVOR_DISK_KEY] = disk
900 # generate a new uuid put to internal dict and return it.
901 flavor_id = uuid.uuid4()
902 vimconnector.flavorlist[str(flavor_id)] = new_flavor
903 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
904
905 return str(flavor_id)
906
907 def delete_flavor(self, flavor_id):
908 """Deletes a tenant flavor from VIM identify by its id
909
910 Returns the used id or raise an exception
911 """
912 if flavor_id not in vimconnector.flavorlist:
913 raise vimconn.vimconnNotFoundException("Flavor not found.")
914
915 vimconnector.flavorlist.pop(flavor_id, None)
916 return flavor_id
917
918 def new_image(self, image_dict):
919 """
920 Adds a tenant image to VIM
921 Returns:
922 200, image-id if the image is created
923 <0, message if there is an error
924 """
925
926 return self.get_image_id_from_path(image_dict['location'])
927
928 def delete_image(self, image_id):
929 """
930 Deletes a tenant image from VIM
931 Args:
932 image_id is ID of Image to be deleted
933 Return:
934 returns the image identifier in UUID format or raises an exception on error
935 """
936 conn = self.connect_as_admin()
937 if not conn:
938 raise vimconn.vimconnConnectionException("Failed to connect vCD")
939 # Get Catalog details
940 url_list = [self.url, '/api/catalog/', image_id]
941 catalog_herf = ''.join(url_list)
942
943 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
944 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
945
946 response = self.perform_request(req_type='GET',
947 url=catalog_herf,
948 headers=headers)
949
950 if response.status_code != requests.codes.ok:
951 self.logger.debug("delete_image():GET REST API call {} failed. "\
952 "Return status code {}".format(catalog_herf,
953 response.status_code))
954 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
955
956 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
957 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
958 #For python3
959 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
960 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
961
962 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
963 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
964 for catalogItem in catalogItems:
965 catalogItem_href = catalogItem.attrib['href']
966
967 response = self.perform_request(req_type='GET',
968 url=catalogItem_href,
969 headers=headers)
970
971 if response.status_code != requests.codes.ok:
972 self.logger.debug("delete_image():GET REST API call {} failed. "\
973 "Return status code {}".format(catalog_herf,
974 response.status_code))
975 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
976 catalogItem,
977 image_id))
978
979 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
980 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
981 #For python3
982 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
983 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
984 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
985
986 #Remove catalogItem
987 response = self.perform_request(req_type='DELETE',
988 url=catalogitem_remove_href,
989 headers=headers)
990 if response.status_code == requests.codes.no_content:
991 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
992 else:
993 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
994
995 #Remove catalog
996 url_list = [self.url, '/api/admin/catalog/', image_id]
997 catalog_remove_herf = ''.join(url_list)
998 response = self.perform_request(req_type='DELETE',
999 url=catalog_remove_herf,
1000 headers=headers)
1001
1002 if response.status_code == requests.codes.no_content:
1003 self.logger.debug("Deleted Catalog {}".format(image_id))
1004 return image_id
1005 else:
1006 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1007
1008
1009 def catalog_exists(self, catalog_name, catalogs):
1010 """
1011
1012 :param catalog_name:
1013 :param catalogs:
1014 :return:
1015 """
1016 for catalog in catalogs:
1017 if catalog['name'] == catalog_name:
1018 return True
1019 return False
1020
1021 def create_vimcatalog(self, vca=None, catalog_name=None):
1022 """ Create new catalog entry in vCloud director.
1023
1024 Args
1025 vca: vCloud director.
1026 catalog_name catalog that client wish to create. Note no validation done for a name.
1027 Client must make sure that provide valid string representation.
1028
1029 Return (bool) True if catalog created.
1030
1031 """
1032 try:
1033 result = vca.create_catalog(catalog_name, catalog_name)
1034 if result is not None:
1035 return True
1036 catalogs = vca.list_catalogs()
1037 except:
1038 return False
1039 return self.catalog_exists(catalog_name, catalogs)
1040
1041 # noinspection PyIncorrectDocstring
1042 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1043 description='', progress=False, chunk_bytes=128 * 1024):
1044 """
1045 Uploads a OVF file to a vCloud catalog
1046
1047 :param chunk_bytes:
1048 :param progress:
1049 :param description:
1050 :param image_name:
1051 :param vca:
1052 :param catalog_name: (str): The name of the catalog to upload the media.
1053 :param media_file_name: (str): The name of the local media file to upload.
1054 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1055 """
1056 os.path.isfile(media_file_name)
1057 statinfo = os.stat(media_file_name)
1058
1059 # find a catalog entry where we upload OVF.
1060 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1061 # status change.
1062 # if VCD can parse OVF we upload VMDK file
1063 try:
1064 for catalog in vca.list_catalogs():
1065 if catalog_name != catalog['name']:
1066 continue
1067 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1068 data = """
1069 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1070 """.format(catalog_name, description)
1071
1072 if self.client:
1073 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1074 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1075 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1076
1077 response = self.perform_request(req_type='POST',
1078 url=catalog_href,
1079 headers=headers,
1080 data=data)
1081
1082 if response.status_code == requests.codes.created:
1083 catalogItem = XmlElementTree.fromstring(response.content)
1084 entity = [child for child in catalogItem if
1085 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1086 href = entity.get('href')
1087 template = href
1088
1089 response = self.perform_request(req_type='GET',
1090 url=href,
1091 headers=headers)
1092
1093 if response.status_code == requests.codes.ok:
1094 headers['Content-Type'] = 'Content-Type text/xml'
1095 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1096 if result:
1097 transfer_href = result.group(1)
1098
1099 response = self.perform_request(req_type='PUT',
1100 url=transfer_href,
1101 headers=headers,
1102 data=open(media_file_name, 'rb'))
1103 if response.status_code != requests.codes.ok:
1104 self.logger.debug(
1105 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1106 media_file_name))
1107 return False
1108
1109 # TODO fix this with aync block
1110 time.sleep(5)
1111
1112 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1113
1114 # uploading VMDK file
1115 # check status of OVF upload and upload remaining files.
1116 response = self.perform_request(req_type='GET',
1117 url=template,
1118 headers=headers)
1119
1120 if response.status_code == requests.codes.ok:
1121 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1122 if result:
1123 link_href = result.group(1)
1124 # we skip ovf since it already uploaded.
1125 if 'ovf' in link_href:
1126 continue
1127 # The OVF file and VMDK must be in a same directory
1128 head, tail = os.path.split(media_file_name)
1129 file_vmdk = head + '/' + link_href.split("/")[-1]
1130 if not os.path.isfile(file_vmdk):
1131 return False
1132 statinfo = os.stat(file_vmdk)
1133 if statinfo.st_size == 0:
1134 return False
1135 hrefvmdk = link_href
1136
1137 if progress:
1138 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1139 FileTransferSpeed()]
1140 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1141
1142 bytes_transferred = 0
1143 f = open(file_vmdk, 'rb')
1144 while bytes_transferred < statinfo.st_size:
1145 my_bytes = f.read(chunk_bytes)
1146 if len(my_bytes) <= chunk_bytes:
1147 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1148 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1149 headers['Content-Length'] = str(len(my_bytes))
1150 response = requests.put(url=hrefvmdk,
1151 headers=headers,
1152 data=my_bytes,
1153 verify=False)
1154 if response.status_code == requests.codes.ok:
1155 bytes_transferred += len(my_bytes)
1156 if progress:
1157 progress_bar.update(bytes_transferred)
1158 else:
1159 self.logger.debug(
1160 'file upload failed with error: [%s] %s' % (response.status_code,
1161 response.content))
1162
1163 f.close()
1164 return False
1165 f.close()
1166 if progress:
1167 progress_bar.finish()
1168 time.sleep(10)
1169 return True
1170 else:
1171 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1172 format(catalog_name, media_file_name))
1173 return False
1174 except Exception as exp:
1175 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1176 .format(catalog_name,media_file_name, exp))
1177 raise vimconn.vimconnException(
1178 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1179 .format(catalog_name,media_file_name, exp))
1180
1181 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1182 return False
1183
1184 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1185 """Upload media file"""
1186 # TODO add named parameters for readability
1187
1188 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1189 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1190
1191 def validate_uuid4(self, uuid_string=None):
1192 """ Method validate correct format of UUID.
1193
1194 Return: true if string represent valid uuid
1195 """
1196 try:
1197 val = uuid.UUID(uuid_string, version=4)
1198 except ValueError:
1199 return False
1200 return True
1201
1202 def get_catalogid(self, catalog_name=None, catalogs=None):
1203 """ Method check catalog and return catalog ID in UUID format.
1204
1205 Args
1206 catalog_name: catalog name as string
1207 catalogs: list of catalogs.
1208
1209 Return: catalogs uuid
1210 """
1211
1212 for catalog in catalogs:
1213 if catalog['name'] == catalog_name:
1214 catalog_id = catalog['id']
1215 return catalog_id
1216 return None
1217
1218 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1219 """ Method check catalog and return catalog name lookup done by catalog UUID.
1220
1221 Args
1222 catalog_name: catalog name as string
1223 catalogs: list of catalogs.
1224
1225 Return: catalogs name or None
1226 """
1227
1228 if not self.validate_uuid4(uuid_string=catalog_uuid):
1229 return None
1230
1231 for catalog in catalogs:
1232 catalog_id = catalog.get('id')
1233 if catalog_id == catalog_uuid:
1234 return catalog.get('name')
1235 return None
1236
1237 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1238 """ Method check catalog and return catalog name lookup done by catalog UUID.
1239
1240 Args
1241 catalog_name: catalog name as string
1242 catalogs: list of catalogs.
1243
1244 Return: catalogs name or None
1245 """
1246
1247 if not self.validate_uuid4(uuid_string=catalog_uuid):
1248 return None
1249
1250 for catalog in catalogs:
1251 catalog_id = catalog.get('id')
1252 if catalog_id == catalog_uuid:
1253 return catalog
1254 return None
1255
1256 def get_image_id_from_path(self, path=None, progress=False):
1257 """ Method upload OVF image to vCloud director.
1258
1259 Each OVF image represented as single catalog entry in vcloud director.
1260 The method check for existing catalog entry. The check done by file name without file extension.
1261
1262 if given catalog name already present method will respond with existing catalog uuid otherwise
1263 it will create new catalog entry and upload OVF file to newly created catalog.
1264
1265 If method can't create catalog entry or upload a file it will throw exception.
1266
1267 Method accept boolean flag progress that will output progress bar. It useful method
1268 for standalone upload use case. In case to test large file upload.
1269
1270 Args
1271 path: - valid path to OVF file.
1272 progress - boolean progress bar show progress bar.
1273
1274 Return: if image uploaded correct method will provide image catalog UUID.
1275 """
1276
1277 if not path:
1278 raise vimconn.vimconnException("Image path can't be None.")
1279
1280 if not os.path.isfile(path):
1281 raise vimconn.vimconnException("Can't read file. File not found.")
1282
1283 if not os.access(path, os.R_OK):
1284 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1285
1286 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1287
1288 dirpath, filename = os.path.split(path)
1289 flname, file_extension = os.path.splitext(path)
1290 if file_extension != '.ovf':
1291 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1292 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1293
1294 catalog_name = os.path.splitext(filename)[0]
1295 catalog_md5_name = hashlib.md5(path).hexdigest()
1296 self.logger.debug("File name {} Catalog Name {} file path {} "
1297 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1298
1299 try:
1300 org,vdc = self.get_vdc_details()
1301 catalogs = org.list_catalogs()
1302 except Exception as exp:
1303 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1304 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1305
1306 if len(catalogs) == 0:
1307 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1308 result = self.create_vimcatalog(org, catalog_md5_name)
1309 if not result:
1310 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1311
1312 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1313 media_name=filename, medial_file_name=path, progress=progress)
1314 if not result:
1315 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1316 return self.get_catalogid(catalog_name, catalogs)
1317 else:
1318 for catalog in catalogs:
1319 # search for existing catalog if we find same name we return ID
1320 # TODO optimize this
1321 if catalog['name'] == catalog_md5_name:
1322 self.logger.debug("Found existing catalog entry for {} "
1323 "catalog id {}".format(catalog_name,
1324 self.get_catalogid(catalog_md5_name, catalogs)))
1325 return self.get_catalogid(catalog_md5_name, catalogs)
1326
1327 # if we didn't find existing catalog we create a new one and upload image.
1328 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1329 result = self.create_vimcatalog(org, catalog_md5_name)
1330 if not result:
1331 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1332
1333 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1334 media_name=filename, medial_file_name=path, progress=progress)
1335 if not result:
1336 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1337
1338 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1339
1340 def get_image_list(self, filter_dict={}):
1341 '''Obtain tenant images from VIM
1342 Filter_dict can be:
1343 name: image name
1344 id: image uuid
1345 checksum: image checksum
1346 location: image path
1347 Returns the image list of dictionaries:
1348 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1349 List can be empty
1350 '''
1351
1352 try:
1353 org, vdc = self.get_vdc_details()
1354 image_list = []
1355 catalogs = org.list_catalogs()
1356 if len(catalogs) == 0:
1357 return image_list
1358 else:
1359 for catalog in catalogs:
1360 catalog_uuid = catalog.get('id')
1361 name = catalog.get('name')
1362 filtered_dict = {}
1363 if filter_dict.get("name") and filter_dict["name"] != name:
1364 continue
1365 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1366 continue
1367 filtered_dict ["name"] = name
1368 filtered_dict ["id"] = catalog_uuid
1369 image_list.append(filtered_dict)
1370
1371 self.logger.debug("List of already created catalog items: {}".format(image_list))
1372 return image_list
1373 except Exception as exp:
1374 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1375
1376 def get_vappid(self, vdc=None, vapp_name=None):
1377 """ Method takes vdc object and vApp name and returns vapp uuid or None
1378
1379 Args:
1380 vdc: The VDC object.
1381 vapp_name: is application vappp name identifier
1382
1383 Returns:
1384 The return vApp name otherwise None
1385 """
1386 if vdc is None or vapp_name is None:
1387 return None
1388 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1389 try:
1390 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1391 vdc.ResourceEntities.ResourceEntity)
1392 #For python3
1393 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1394 # if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1395 if len(refs) == 1:
1396 return refs[0].href.split("vapp")[1][1:]
1397 except Exception as e:
1398 self.logger.exception(e)
1399 return False
1400 return None
1401
1402 def check_vapp(self, vdc=None, vapp_uuid=None):
1403 """ Method Method returns True or False if vapp deployed in vCloud director
1404
1405 Args:
1406 vca: Connector to VCA
1407 vdc: The VDC object.
1408 vappid: vappid is application identifier
1409
1410 Returns:
1411 The return True if vApp deployed
1412 :param vdc:
1413 :param vapp_uuid:
1414 """
1415 try:
1416 refs = filter(lambda ref:
1417 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1418 vdc.ResourceEntities.ResourceEntity)
1419 #For python3
1420 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1421 # if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1422 for ref in refs:
1423 vappid = ref.href.split("vapp")[1][1:]
1424 # find vapp with respected vapp uuid
1425 if vappid == vapp_uuid:
1426 return True
1427 except Exception as e:
1428 self.logger.exception(e)
1429 return False
1430 return False
1431
1432 def get_namebyvappid(self, vapp_uuid=None):
1433 """Method returns vApp name from vCD and lookup done by vapp_id.
1434
1435 Args:
1436 vapp_uuid: vappid is application identifier
1437
1438 Returns:
1439 The return vApp name otherwise None
1440 """
1441 try:
1442 if self.client and vapp_uuid:
1443 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1444 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1445 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1446
1447 response = self.perform_request(req_type='GET',
1448 url=vapp_call,
1449 headers=headers)
1450 #Retry login if session expired & retry sending request
1451 if response.status_code == 403:
1452 response = self.retry_rest('GET', vapp_call)
1453
1454 tree = XmlElementTree.fromstring(response.content)
1455 return tree.attrib['name']
1456 except Exception as e:
1457 self.logger.exception(e)
1458 return None
1459 return None
1460
1461 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1462 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1463 """Adds a VM instance to VIM
1464 Params:
1465 'start': (boolean) indicates if VM must start or created in pause mode.
1466 'image_id','flavor_id': image and flavor VIM id to use for the VM
1467 'net_list': list of interfaces, each one is a dictionary with:
1468 'name': (optional) name for the interface.
1469 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1470 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1471 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1472 'mac_address': (optional) mac address to assign to this interface
1473 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1474 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1475 'type': (mandatory) can be one of:
1476 'virtual', in this case always connected to a network of type 'net_type=bridge'
1477 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1478 can created unconnected
1479 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1480 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1481 are allocated on the same physical NIC
1482 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1483 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1484 or True, it must apply the default VIM behaviour
1485 After execution the method will add the key:
1486 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1487 interface. 'net_list' is modified
1488 'cloud_config': (optional) dictionary with:
1489 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1490 'users': (optional) list of users to be inserted, each item is a dict with:
1491 'name': (mandatory) user name,
1492 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1493 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1494 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1495 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1496 'dest': (mandatory) string with the destination absolute path
1497 'encoding': (optional, by default text). Can be one of:
1498 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1499 'content' (mandatory): string with the content of the file
1500 'permissions': (optional) string with file permissions, typically octal notation '0644'
1501 'owner': (optional) file owner, string with the format 'owner:group'
1502 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1503 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1504 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1505 'size': (mandatory) string with the size of the disk in GB
1506 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1507 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1508 availability_zone_index is None
1509 Returns a tuple with the instance identifier and created_items or raises an exception on error
1510 created_items can be None or a dictionary where this method can include key-values that will be passed to
1511 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1512 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1513 as not present.
1514 """
1515 self.logger.info("Creating new instance for entry {}".format(name))
1516 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1517 "availability_zone_index {} availability_zone_list {}"\
1518 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1519 availability_zone_index, availability_zone_list))
1520
1521 #new vm name = vmname + tenant_id + uuid
1522 new_vm_name = [name, '-', str(uuid.uuid4())]
1523 vmname_andid = ''.join(new_vm_name)
1524
1525 for net in net_list:
1526 if net['type'] == "PCI-PASSTHROUGH":
1527 raise vimconn.vimconnNotSupportedException(
1528 "Current vCD version does not support type : {}".format(net['type']))
1529
1530 if len(net_list) > 10:
1531 raise vimconn.vimconnNotSupportedException(
1532 "The VM hardware versions 7 and above support upto 10 NICs only")
1533
1534 # if vm already deployed we return existing uuid
1535 # we check for presence of VDC, Catalog entry and Flavor.
1536 org, vdc = self.get_vdc_details()
1537 if vdc is None:
1538 raise vimconn.vimconnNotFoundException(
1539 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1540 catalogs = org.list_catalogs()
1541 if catalogs is None:
1542 #Retry once, if failed by refreshing token
1543 self.get_token()
1544 org = Org(self.client, resource=self.client.get_org())
1545 catalogs = org.list_catalogs()
1546 if catalogs is None:
1547 raise vimconn.vimconnNotFoundException(
1548 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1549
1550 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1551 if catalog_hash_name:
1552 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1553 else:
1554 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1555 "(Failed retrieve catalog information {})".format(name, image_id))
1556
1557 # Set vCPU and Memory based on flavor.
1558 vm_cpus = None
1559 vm_memory = None
1560 vm_disk = None
1561 numas = None
1562
1563 if flavor_id is not None:
1564 if flavor_id not in vimconnector.flavorlist:
1565 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1566 "Failed retrieve flavor information "
1567 "flavor id {}".format(name, flavor_id))
1568 else:
1569 try:
1570 flavor = vimconnector.flavorlist[flavor_id]
1571 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1572 vm_memory = flavor[FLAVOR_RAM_KEY]
1573 vm_disk = flavor[FLAVOR_DISK_KEY]
1574 extended = flavor.get("extended", None)
1575 if extended:
1576 numas=extended.get("numas", None)
1577
1578 except Exception as exp:
1579 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1580
1581 # image upload creates template name as catalog name space Template.
1582 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1583 power_on = 'false'
1584 if start:
1585 power_on = 'true'
1586
1587 # client must provide at least one entry in net_list if not we report error
1588 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1589 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1590 primary_net = None
1591 primary_netname = None
1592 primary_net_href = None
1593 network_mode = 'bridged'
1594 if net_list is not None and len(net_list) > 0:
1595 for net in net_list:
1596 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1597 primary_net = net
1598 if primary_net is None:
1599 primary_net = net_list[0]
1600
1601 try:
1602 primary_net_id = primary_net['net_id']
1603 url_list = [self.url, '/api/network/', primary_net_id]
1604 primary_net_href = ''.join(url_list)
1605 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1606 if 'name' in network_dict:
1607 primary_netname = network_dict['name']
1608
1609 except KeyError:
1610 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1611 else:
1612 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1613
1614 # use: 'data', 'bridge', 'mgmt'
1615 # create vApp. Set vcpu and ram based on flavor id.
1616 try:
1617 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1618 if not vdc_obj:
1619 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1620
1621 for retry in (1,2):
1622 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1623 catalog_items = [items.attrib]
1624
1625 if len(catalog_items) == 1:
1626 if self.client:
1627 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1628 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1629
1630 response = self.perform_request(req_type='GET',
1631 url=catalog_items[0].get('href'),
1632 headers=headers)
1633 catalogItem = XmlElementTree.fromstring(response.content)
1634 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1635 vapp_tempalte_href = entity.get("href")
1636
1637 response = self.perform_request(req_type='GET',
1638 url=vapp_tempalte_href,
1639 headers=headers)
1640 if response.status_code != requests.codes.ok:
1641 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1642 response.status_code))
1643 else:
1644 result = (response.content).replace("\n"," ")
1645
1646 src = re.search('<Vm goldMaster="false"\sstatus="\d+"\sname="(.*?)"\s'
1647 'id="(\w+:\w+:vm:.*?)"\shref="(.*?)"\s'
1648 'type="application/vnd\.vmware\.vcloud\.vm\+xml',result)
1649 if src:
1650 vm_name = src.group(1)
1651 vm_id = src.group(2)
1652 vm_href = src.group(3)
1653
1654 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1655 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1656 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1657
1658 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1659 vdc_id = vdc.get('id').split(':')[-1]
1660 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1661 vdc_id)
1662 data = """<?xml version="1.0" encoding="UTF-8"?>
1663 <InstantiateVAppTemplateParams
1664 xmlns="http://www.vmware.com/vcloud/v1.5"
1665 name="{}"
1666 deploy="false"
1667 powerOn="false"
1668 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1669 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1670 <Description>Vapp instantiation</Description>
1671 <InstantiationParams>
1672 <NetworkConfigSection>
1673 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1674 <NetworkConfig networkName="{}">
1675 <Configuration>
1676 <ParentNetwork href="{}" />
1677 <FenceMode>bridged</FenceMode>
1678 </Configuration>
1679 </NetworkConfig>
1680 </NetworkConfigSection>
1681 <LeaseSettingsSection
1682 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1683 <ovf:Info>Lease Settings</ovf:Info>
1684 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1685 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1686 </LeaseSettingsSection>
1687 </InstantiationParams>
1688 <Source href="{}"/>
1689 <SourcedItem>
1690 <Source href="{}" id="{}" name="{}"
1691 type="application/vnd.vmware.vcloud.vm+xml"/>
1692 <VmGeneralParams>
1693 <NeedsCustomization>false</NeedsCustomization>
1694 </VmGeneralParams>
1695 <InstantiationParams>
1696 <NetworkConnectionSection>
1697 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1698 <NetworkConnection network="{}">
1699 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1700 <IsConnected>true</IsConnected>
1701 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1702 </NetworkConnection>
1703 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1704 <ovf:Info>Virtual hardware requirements</ovf:Info>
1705 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1706 xmlns:vmw="http://www.vmware.com/schema/ovf">
1707 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1708 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1709 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1710 <rasd:InstanceID>4</rasd:InstanceID>
1711 <rasd:Reservation>0</rasd:Reservation>
1712 <rasd:ResourceType>3</rasd:ResourceType>
1713 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1714 <rasd:Weight>0</rasd:Weight>
1715 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1716 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1717 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1718 <rasd:Description>Memory Size</rasd:Description>
1719 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1720 <rasd:InstanceID>5</rasd:InstanceID>
1721 <rasd:Reservation>0</rasd:Reservation>
1722 <rasd:ResourceType>4</rasd:ResourceType>
1723 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1724 <rasd:Weight>0</rasd:Weight>
1725 </ovf:Item>
1726 </ovf:VirtualHardwareSection>
1727 </InstantiationParams>
1728 </SourcedItem>
1729 <AllEULAsAccepted>false</AllEULAsAccepted>
1730 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1731 primary_netname,
1732 primary_net_href,
1733 vapp_tempalte_href,
1734 vm_href,
1735 vm_id,
1736 vm_name,
1737 primary_netname,
1738 cpu=cpus,
1739 core=cores,
1740 memory=memory_mb)
1741
1742 response = self.perform_request(req_type='POST',
1743 url=instantiate_vapp_href,
1744 headers=headers,
1745 data=data)
1746
1747 if response.status_code != 201:
1748 self.logger.error("REST call {} failed reason : {}"\
1749 "status code : {}".format(instantiate_vapp_href,
1750 response.content,
1751 response.status_code))
1752 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1753 "vAapp {}".format(vmname_andid))
1754 else:
1755 vapptask = self.get_task_from_response(response.content)
1756
1757 if vapptask is None and retry==1:
1758 self.get_token() # Retry getting token
1759 continue
1760 else:
1761 break
1762
1763 if vapptask is None or vapptask is False:
1764 raise vimconn.vimconnUnexpectedResponse(
1765 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1766
1767 # wait for task to complete
1768 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1769
1770 if result.get('status') == 'success':
1771 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1772 else:
1773 raise vimconn.vimconnUnexpectedResponse(
1774 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1775
1776 except Exception as exp:
1777 raise vimconn.vimconnUnexpectedResponse(
1778 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1779
1780 # we should have now vapp in undeployed state.
1781 try:
1782 vdc_obj = VDC(self.client, href=vdc.get('href'))
1783 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1784 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1785 vapp = VApp(self.client, resource=vapp_resource)
1786
1787 except Exception as exp:
1788 raise vimconn.vimconnUnexpectedResponse(
1789 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1790 .format(vmname_andid, exp))
1791
1792 if vapp_uuid is None:
1793 raise vimconn.vimconnUnexpectedResponse(
1794 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1795 vmname_andid))
1796
1797 #Add PCI passthrough/SRIOV configrations
1798 vm_obj = None
1799 pci_devices_info = []
1800 reserve_memory = False
1801
1802 for net in net_list:
1803 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1804 pci_devices_info.append(net)
1805 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1806 reserve_memory = True
1807
1808 #Add PCI
1809 if len(pci_devices_info) > 0:
1810 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1811 vmname_andid ))
1812 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1813 pci_devices_info,
1814 vmname_andid)
1815 if PCI_devices_status:
1816 self.logger.info("Added PCI devives {} to VM {}".format(
1817 pci_devices_info,
1818 vmname_andid)
1819 )
1820 reserve_memory = True
1821 else:
1822 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1823 pci_devices_info,
1824 vmname_andid)
1825 )
1826
1827 # Modify vm disk
1828 if vm_disk:
1829 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1830 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1831 if result :
1832 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1833
1834 #Add new or existing disks to vApp
1835 if disk_list:
1836 added_existing_disk = False
1837 for disk in disk_list:
1838 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1839 image_id = disk['image_id']
1840 # Adding CD-ROM to VM
1841 # will revisit code once specification ready to support this feature
1842 self.insert_media_to_vm(vapp, image_id)
1843 elif "image_id" in disk and disk["image_id"] is not None:
1844 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1845 disk["image_id"] , vapp_uuid))
1846 self.add_existing_disk(catalogs=catalogs,
1847 image_id=disk["image_id"],
1848 size = disk["size"],
1849 template_name=templateName,
1850 vapp_uuid=vapp_uuid
1851 )
1852 added_existing_disk = True
1853 else:
1854 #Wait till added existing disk gets reflected into vCD database/API
1855 if added_existing_disk:
1856 time.sleep(5)
1857 added_existing_disk = False
1858 self.add_new_disk(vapp_uuid, disk['size'])
1859
1860 if numas:
1861 # Assigning numa affinity setting
1862 for numa in numas:
1863 if 'paired-threads-id' in numa:
1864 paired_threads_id = numa['paired-threads-id']
1865 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1866
1867 # add NICs & connect to networks in netlist
1868 try:
1869 vdc_obj = VDC(self.client, href=vdc.get('href'))
1870 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1871 vapp = VApp(self.client, resource=vapp_resource)
1872
1873 self.logger.info("Removing primary NIC: ")
1874 # First remove all NICs so that NIC properties can be adjusted as needed
1875 self.remove_primary_network_adapter_from_all_vms(vapp)
1876
1877 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1878 primary_nic_index = 0
1879 nicIndex = 0
1880 for net in net_list:
1881 # openmano uses network id in UUID format.
1882 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1883 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1884 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1885
1886 if 'net_id' not in net:
1887 continue
1888
1889 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1890 #Same will be returned in refresh_vms_status() as vim_interface_id
1891 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1892
1893 interface_net_id = net['net_id']
1894 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1895 interface_network_mode = net['use']
1896
1897 if interface_network_mode == 'mgmt':
1898 primary_nic_index = nicIndex
1899
1900 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1901 - DHCP (The IP address is obtained from a DHCP service.)
1902 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1903 - NONE (No IP addressing mode specified.)"""
1904
1905 if primary_netname is not None:
1906 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1907 nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
1908 #For python3
1909 #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1910 if len(nets) == 1:
1911 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1912
1913 if interface_net_name != primary_netname:
1914 # connect network to VM - with all DHCP by default
1915 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1916 task = vapp.connect_org_vdc_network(nets[0].get('name'))
1917 self.client.get_task_monitor().wait_for_success(task=task)
1918
1919 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1920 if 'type' in net and net['type'] not in type_list:
1921 # fetching nic type from vnf
1922 if 'model' in net:
1923 if net['model'] is not None and net['model'].lower() == 'virtio':
1924 nic_type = 'VMXNET3'
1925 else:
1926 nic_type = net['model']
1927
1928 self.logger.info("new_vminstance(): adding network adapter "\
1929 "to a network {}".format(nets[0].get('name')))
1930 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1931 primary_nic_index,
1932 nicIndex,
1933 net,
1934 nic_type=nic_type)
1935 else:
1936 self.logger.info("new_vminstance(): adding network adapter "\
1937 "to a network {}".format(nets[0].get('name')))
1938 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1939 primary_nic_index,
1940 nicIndex,
1941 net)
1942 nicIndex += 1
1943
1944 # cloud-init for ssh-key injection
1945 if cloud_config:
1946 self.cloud_init(vapp,cloud_config)
1947
1948 # If VM has PCI devices or SRIOV reserve memory for VM
1949 if reserve_memory:
1950 self.reserve_memory_for_all_vms(vapp, memory_mb)
1951
1952 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1953
1954 vapp_id = vapp_resource.get('id').split(':')[-1]
1955 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1956 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1957 if result.get('status') == 'success':
1958 self.logger.info("new_vminstance(): Successfully power on "\
1959 "vApp {}".format(vmname_andid))
1960 else:
1961 self.logger.error("new_vminstance(): failed to power on vApp "\
1962 "{}".format(vmname_andid))
1963
1964 except Exception as exp :
1965 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1966 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1967 .format(name, exp))
1968 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1969 .format(name, exp))
1970
1971 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1972 wait_time = 0
1973 vapp_uuid = None
1974 while wait_time <= MAX_WAIT_TIME:
1975 try:
1976 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1977 vapp = VApp(self.client, resource=vapp_resource)
1978 except Exception as exp:
1979 raise vimconn.vimconnUnexpectedResponse(
1980 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1981 .format(vmname_andid, exp))
1982
1983 #if vapp and vapp.me.deployed:
1984 if vapp and vapp_resource.get('deployed') == 'true':
1985 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1986 break
1987 else:
1988 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1989 time.sleep(INTERVAL_TIME)
1990
1991 wait_time +=INTERVAL_TIME
1992
1993 #SET Affinity Rule for VM
1994 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
1995 #While creating VIM account user has to pass the Host Group names in availability_zone list
1996 #"availability_zone" is a part of VIM "config" parameters
1997 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
1998 #Host groups are referred as availability zones
1999 #With following procedure, deployed VM will be added into a VM group.
2000 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2001 if(availability_zone_list):
2002 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2003 #Admin access required for creating Affinity rules
2004 client = self.connect_as_admin()
2005 if not client:
2006 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2007 else:
2008 self.client = client
2009 if self.client:
2010 headers = {'Accept':'application/*+xml;version=27.0',
2011 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2012 #Step1: Get provider vdc details from organization
2013 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2014 if pvdc_href is not None:
2015 #Step2: Found required pvdc, now get resource pool information
2016 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2017 if respool_href is None:
2018 #Raise error if respool_href not found
2019 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2020 .format(pvdc_href)
2021 self.log_message(msg)
2022
2023 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2024 # get availability Zone
2025 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2026 # check if provided av zone(hostGroup) is present in vCD VIM
2027 status = self.check_availibility_zone(vm_az, respool_href, headers)
2028 if status is False:
2029 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2030 "resource pool {} status: {}".format(vm_az,respool_href,status)
2031 self.log_message(msg)
2032 else:
2033 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2034
2035 #Step4: Find VM group references to create vm group
2036 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2037 if vmgrp_href == None:
2038 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2039 self.log_message(msg)
2040
2041 #Step5: Create a VmGroup with name az_VmGroup
2042 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2043 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2044 if status is not True:
2045 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2046 self.log_message(msg)
2047
2048 #VM Group url to add vms to vm group
2049 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2050
2051 #Step6: Add VM to VM Group
2052 #Find VM uuid from vapp_uuid
2053 vm_details = self.get_vapp_details_rest(vapp_uuid)
2054 vm_uuid = vm_details['vmuuid']
2055
2056 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2057 if status is not True:
2058 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2059 self.log_message(msg)
2060
2061 #Step7: Create VM to Host affinity rule
2062 addrule_href = self.get_add_rule_reference (respool_href, headers)
2063 if addrule_href is None:
2064 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2065 .format(respool_href)
2066 self.log_message(msg)
2067
2068 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2069 if status is False:
2070 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2071 .format(name, vm_az)
2072 self.log_message(msg)
2073 else:
2074 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2075 .format(name, vm_az))
2076 #Reset token to a normal user to perform other operations
2077 self.get_token()
2078
2079 if vapp_uuid is not None:
2080 return vapp_uuid, None
2081 else:
2082 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2083
2084
2085 def get_vcd_availibility_zones(self,respool_href, headers):
2086 """ Method to find presence of av zone is VIM resource pool
2087
2088 Args:
2089 respool_href - resource pool href
2090 headers - header information
2091
2092 Returns:
2093 vcd_az - list of azone present in vCD
2094 """
2095 vcd_az = []
2096 url=respool_href
2097 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2098
2099 if resp.status_code != requests.codes.ok:
2100 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2101 else:
2102 #Get the href to hostGroups and find provided hostGroup is present in it
2103 resp_xml = XmlElementTree.fromstring(resp.content)
2104 for child in resp_xml:
2105 if 'VMWProviderVdcResourcePool' in child.tag:
2106 for schild in child:
2107 if 'Link' in schild.tag:
2108 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2109 hostGroup = schild.attrib.get('href')
2110 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2111 if hg_resp.status_code != requests.codes.ok:
2112 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2113 else:
2114 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2115 for hostGroup in hg_resp_xml:
2116 if 'HostGroup' in hostGroup.tag:
2117 #append host group name to the list
2118 vcd_az.append(hostGroup.attrib.get("name"))
2119 return vcd_az
2120
2121
2122 def set_availability_zones(self):
2123 """
2124 Set vim availability zone
2125 """
2126
2127 vim_availability_zones = None
2128 availability_zone = None
2129 if 'availability_zone' in self.config:
2130 vim_availability_zones = self.config.get('availability_zone')
2131 if isinstance(vim_availability_zones, str):
2132 availability_zone = [vim_availability_zones]
2133 elif isinstance(vim_availability_zones, list):
2134 availability_zone = vim_availability_zones
2135 else:
2136 return availability_zone
2137
2138 return availability_zone
2139
2140
2141 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2142 """
2143 Return the availability zone to be used by the created VM.
2144 returns: The VIM availability zone to be used or None
2145 """
2146 if availability_zone_index is None:
2147 if not self.config.get('availability_zone'):
2148 return None
2149 elif isinstance(self.config.get('availability_zone'), str):
2150 return self.config['availability_zone']
2151 else:
2152 return self.config['availability_zone'][0]
2153
2154 vim_availability_zones = self.availability_zone
2155
2156 # check if VIM offer enough availability zones describe in the VNFD
2157 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2158 # check if all the names of NFV AV match VIM AV names
2159 match_by_index = False
2160 for av in availability_zone_list:
2161 if av not in vim_availability_zones:
2162 match_by_index = True
2163 break
2164 if match_by_index:
2165 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2166 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2167 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2168 self.logger.debug("VIM Availability zones will be used by index")
2169 return vim_availability_zones[availability_zone_index]
2170 else:
2171 return availability_zone_list[availability_zone_index]
2172 else:
2173 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2174
2175
2176 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2177 """ Method to create VM to Host Affinity rule in vCD
2178
2179 Args:
2180 addrule_href - href to make a POST request
2181 vmgrpname - name of the VM group created
2182 hostgrpnmae - name of the host group created earlier
2183 polarity - Affinity or Anti-affinity (default: Affinity)
2184 headers - headers to make REST call
2185
2186 Returns:
2187 True- if rule is created
2188 False- Failed to create rule due to some error
2189
2190 """
2191 task_status = False
2192 rule_name = polarity + "_" + vmgrpname
2193 payload = """<?xml version="1.0" encoding="UTF-8"?>
2194 <vmext:VMWVmHostAffinityRule
2195 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2196 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2197 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2198 <vcloud:Name>{}</vcloud:Name>
2199 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2200 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2201 <vcloud:Polarity>{}</vcloud:Polarity>
2202 <vmext:HostGroupName>{}</vmext:HostGroupName>
2203 <vmext:VmGroupName>{}</vmext:VmGroupName>
2204 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2205
2206 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2207
2208 if resp.status_code != requests.codes.accepted:
2209 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2210 task_status = False
2211 return task_status
2212 else:
2213 affinity_task = self.get_task_from_response(resp.content)
2214 self.logger.debug ("affinity_task: {}".format(affinity_task))
2215 if affinity_task is None or affinity_task is False:
2216 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2217 # wait for task to complete
2218 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2219 if result.get('status') == 'success':
2220 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2221 return True
2222 else:
2223 raise vimconn.vimconnUnexpectedResponse(
2224 "failed to create affinity rule {}".format(rule_name))
2225
2226
2227 def get_add_rule_reference (self, respool_href, headers):
2228 """ This method finds href to add vm to host affinity rule to vCD
2229
2230 Args:
2231 respool_href- href to resource pool
2232 headers- header information to make REST call
2233
2234 Returns:
2235 None - if no valid href to add rule found or
2236 addrule_href - href to add vm to host affinity rule of resource pool
2237 """
2238 addrule_href = None
2239 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2240
2241 if resp.status_code != requests.codes.ok:
2242 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2243 else:
2244
2245 resp_xml = XmlElementTree.fromstring(resp.content)
2246 for child in resp_xml:
2247 if 'VMWProviderVdcResourcePool' in child.tag:
2248 for schild in child:
2249 if 'Link' in schild.tag:
2250 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2251 schild.attrib.get('rel') == "add":
2252 addrule_href = schild.attrib.get('href')
2253 break
2254
2255 return addrule_href
2256
2257
2258 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2259 """ Method to add deployed VM to newly created VM Group.
2260 This is required to create VM to Host affinity in vCD
2261
2262 Args:
2263 vm_uuid- newly created vm uuid
2264 vmGroupNameURL- URL to VM Group name
2265 vmGroup_name- Name of VM group created
2266 headers- Headers for REST request
2267
2268 Returns:
2269 True- if VM added to VM group successfully
2270 False- if any error encounter
2271 """
2272
2273 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2274
2275 if addvm_resp.status_code != requests.codes.ok:
2276 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2277 .format(vmGroupNameURL, addvm_resp.status_code))
2278 return False
2279 else:
2280 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2281 for child in resp_xml:
2282 if child.tag.split('}')[1] == 'Link':
2283 if child.attrib.get("rel") == "addVms":
2284 addvmtogrpURL = child.attrib.get("href")
2285
2286 #Get vm details
2287 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2288 vmdetailsURL = ''.join(url_list)
2289
2290 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2291
2292 if resp.status_code != requests.codes.ok:
2293 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2294 return False
2295
2296 #Parse VM details
2297 resp_xml = XmlElementTree.fromstring(resp.content)
2298 if resp_xml.tag.split('}')[1] == "Vm":
2299 vm_id = resp_xml.attrib.get("id")
2300 vm_name = resp_xml.attrib.get("name")
2301 vm_href = resp_xml.attrib.get("href")
2302 #print vm_id, vm_name, vm_href
2303 #Add VM into VMgroup
2304 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2305 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2306 xmlns="http://www.vmware.com/vcloud/versions" \
2307 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2308 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2309 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2310 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2311 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2312 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2313 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2314 <ns2:VmReference href="{}" id="{}" name="{}" \
2315 type="application/vnd.vmware.vcloud.vm+xml" />\
2316 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2317
2318 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2319
2320 if addvmtogrp_resp.status_code != requests.codes.accepted:
2321 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2322 return False
2323 else:
2324 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2325 return True
2326
2327
2328 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2329 """Method to create a VM group in vCD
2330
2331 Args:
2332 vmgroup_name : Name of VM group to be created
2333 vmgroup_href : href for vmgroup
2334 headers- Headers for REST request
2335 """
2336 #POST to add URL with required data
2337 vmgroup_status = False
2338 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2339 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2340 <vmCount>1</vmCount>\
2341 </VMWVmGroup>""".format(vmgroup_name)
2342 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2343
2344 if resp.status_code != requests.codes.accepted:
2345 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2346 return vmgroup_status
2347 else:
2348 vmgroup_task = self.get_task_from_response(resp.content)
2349 if vmgroup_task is None or vmgroup_task is False:
2350 raise vimconn.vimconnUnexpectedResponse(
2351 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2352
2353 # wait for task to complete
2354 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2355
2356 if result.get('status') == 'success':
2357 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2358 #time.sleep(10)
2359 vmgroup_status = True
2360 return vmgroup_status
2361 else:
2362 raise vimconn.vimconnUnexpectedResponse(\
2363 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2364
2365
2366 def find_vmgroup_reference(self, url, headers):
2367 """ Method to create a new VMGroup which is required to add created VM
2368 Args:
2369 url- resource pool href
2370 headers- header information
2371
2372 Returns:
2373 returns href to VM group to create VM group
2374 """
2375 #Perform GET on resource pool to find 'add' link to create VMGroup
2376 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2377 vmgrp_href = None
2378 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2379
2380 if resp.status_code != requests.codes.ok:
2381 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2382 else:
2383 #Get the href to add vmGroup to vCD
2384 resp_xml = XmlElementTree.fromstring(resp.content)
2385 for child in resp_xml:
2386 if 'VMWProviderVdcResourcePool' in child.tag:
2387 for schild in child:
2388 if 'Link' in schild.tag:
2389 #Find href with type VMGroup and rel with add
2390 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2391 and schild.attrib.get('rel') == "add":
2392 vmgrp_href = schild.attrib.get('href')
2393 return vmgrp_href
2394
2395
2396 def check_availibility_zone(self, az, respool_href, headers):
2397 """ Method to verify requested av zone is present or not in provided
2398 resource pool
2399
2400 Args:
2401 az - name of hostgroup (availibility_zone)
2402 respool_href - Resource Pool href
2403 headers - Headers to make REST call
2404 Returns:
2405 az_found - True if availibility_zone is found else False
2406 """
2407 az_found = False
2408 headers['Accept']='application/*+xml;version=27.0'
2409 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2410
2411 if resp.status_code != requests.codes.ok:
2412 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2413 else:
2414 #Get the href to hostGroups and find provided hostGroup is present in it
2415 resp_xml = XmlElementTree.fromstring(resp.content)
2416
2417 for child in resp_xml:
2418 if 'VMWProviderVdcResourcePool' in child.tag:
2419 for schild in child:
2420 if 'Link' in schild.tag:
2421 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2422 hostGroup_href = schild.attrib.get('href')
2423 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2424 if hg_resp.status_code != requests.codes.ok:
2425 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2426 else:
2427 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2428 for hostGroup in hg_resp_xml:
2429 if 'HostGroup' in hostGroup.tag:
2430 if hostGroup.attrib.get("name") == az:
2431 az_found = True
2432 break
2433 return az_found
2434
2435
2436 def get_pvdc_for_org(self, org_vdc, headers):
2437 """ This method gets provider vdc references from organisation
2438
2439 Args:
2440 org_vdc - name of the organisation VDC to find pvdc
2441 headers - headers to make REST call
2442
2443 Returns:
2444 None - if no pvdc href found else
2445 pvdc_href - href to pvdc
2446 """
2447
2448 #Get provider VDC references from vCD
2449 pvdc_href = None
2450 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2451 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2452 url = ''.join(url_list)
2453
2454 response = self.perform_request(req_type='GET',url=url, headers=headers)
2455 if response.status_code != requests.codes.ok:
2456 self.logger.debug ("REST API call {} failed. Return status code {}"\
2457 .format(url, response.status_code))
2458 else:
2459 xmlroot_response = XmlElementTree.fromstring(response.content)
2460 for child in xmlroot_response:
2461 if 'ProviderVdcReference' in child.tag:
2462 pvdc_href = child.attrib.get('href')
2463 #Get vdcReferences to find org
2464 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2465 if pvdc_resp.status_code != requests.codes.ok:
2466 raise vimconn.vimconnException("REST API call {} failed. "\
2467 "Return status code {}"\
2468 .format(url, pvdc_resp.status_code))
2469
2470 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2471 for child in pvdc_resp_xml:
2472 if 'Link' in child.tag:
2473 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2474 vdc_href = child.attrib.get('href')
2475
2476 #Check if provided org is present in vdc
2477 vdc_resp = self.perform_request(req_type='GET',
2478 url=vdc_href,
2479 headers=headers)
2480 if vdc_resp.status_code != requests.codes.ok:
2481 raise vimconn.vimconnException("REST API call {} failed. "\
2482 "Return status code {}"\
2483 .format(url, vdc_resp.status_code))
2484 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2485 for child in vdc_resp_xml:
2486 if 'VdcReference' in child.tag:
2487 if child.attrib.get('name') == org_vdc:
2488 return pvdc_href
2489
2490
2491 def get_resource_pool_details(self, pvdc_href, headers):
2492 """ Method to get resource pool information.
2493 Host groups are property of resource group.
2494 To get host groups, we need to GET details of resource pool.
2495
2496 Args:
2497 pvdc_href: href to pvdc details
2498 headers: headers
2499
2500 Returns:
2501 respool_href - Returns href link reference to resource pool
2502 """
2503 respool_href = None
2504 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2505
2506 if resp.status_code != requests.codes.ok:
2507 self.logger.debug ("REST API call {} failed. Return status code {}"\
2508 .format(pvdc_href, resp.status_code))
2509 else:
2510 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2511 for child in respool_resp_xml:
2512 if 'Link' in child.tag:
2513 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2514 respool_href = child.attrib.get("href")
2515 break
2516 return respool_href
2517
2518
2519 def log_message(self, msg):
2520 """
2521 Method to log error messages related to Affinity rule creation
2522 in new_vminstance & raise Exception
2523 Args :
2524 msg - Error message to be logged
2525
2526 """
2527 #get token to connect vCD as a normal user
2528 self.get_token()
2529 self.logger.debug(msg)
2530 raise vimconn.vimconnException(msg)
2531
2532
2533 ##
2534 ##
2535 ## based on current discussion
2536 ##
2537 ##
2538 ## server:
2539 # created: '2016-09-08T11:51:58'
2540 # description: simple-instance.linux1.1
2541 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2542 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2543 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2544 # status: ACTIVE
2545 # error_msg:
2546 # interfaces: …
2547 #
2548 def get_vminstance(self, vim_vm_uuid=None):
2549 """Returns the VM instance information from VIM"""
2550
2551 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2552
2553 org, vdc = self.get_vdc_details()
2554 if vdc is None:
2555 raise vimconn.vimconnConnectionException(
2556 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2557
2558 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2559 if not vm_info_dict:
2560 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2561 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2562
2563 status_key = vm_info_dict['status']
2564 error = ''
2565 try:
2566 vm_dict = {'created': vm_info_dict['created'],
2567 'description': vm_info_dict['name'],
2568 'status': vcdStatusCode2manoFormat[int(status_key)],
2569 'hostId': vm_info_dict['vmuuid'],
2570 'error_msg': error,
2571 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2572
2573 if 'interfaces' in vm_info_dict:
2574 vm_dict['interfaces'] = vm_info_dict['interfaces']
2575 else:
2576 vm_dict['interfaces'] = []
2577 except KeyError:
2578 vm_dict = {'created': '',
2579 'description': '',
2580 'status': vcdStatusCode2manoFormat[int(-1)],
2581 'hostId': vm_info_dict['vmuuid'],
2582 'error_msg': "Inconsistency state",
2583 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2584
2585 return vm_dict
2586
2587 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2588 """Method poweroff and remove VM instance from vcloud director network.
2589
2590 Args:
2591 vm__vim_uuid: VM UUID
2592
2593 Returns:
2594 Returns the instance identifier
2595 """
2596
2597 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2598
2599 org, vdc = self.get_vdc_details()
2600 vdc_obj = VDC(self.client, href=vdc.get('href'))
2601 if vdc_obj is None:
2602 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2603 self.tenant_name))
2604 raise vimconn.vimconnException(
2605 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2606
2607 try:
2608 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2609 vapp_resource = vdc_obj.get_vapp(vapp_name)
2610 vapp = VApp(self.client, resource=vapp_resource)
2611 if vapp_name is None:
2612 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2613 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2614 else:
2615 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2616
2617 # Delete vApp and wait for status change if task executed and vApp is None.
2618
2619 if vapp:
2620 if vapp_resource.get('deployed') == 'true':
2621 self.logger.info("Powering off vApp {}".format(vapp_name))
2622 #Power off vApp
2623 powered_off = False
2624 wait_time = 0
2625 while wait_time <= MAX_WAIT_TIME:
2626 power_off_task = vapp.power_off()
2627 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2628
2629 if result.get('status') == 'success':
2630 powered_off = True
2631 break
2632 else:
2633 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2634 time.sleep(INTERVAL_TIME)
2635
2636 wait_time +=INTERVAL_TIME
2637 if not powered_off:
2638 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2639 else:
2640 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2641
2642 #Undeploy vApp
2643 self.logger.info("Undeploy vApp {}".format(vapp_name))
2644 wait_time = 0
2645 undeployed = False
2646 while wait_time <= MAX_WAIT_TIME:
2647 vapp = VApp(self.client, resource=vapp_resource)
2648 if not vapp:
2649 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2650 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2651 undeploy_task = vapp.undeploy()
2652
2653 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2654 if result.get('status') == 'success':
2655 undeployed = True
2656 break
2657 else:
2658 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2659 time.sleep(INTERVAL_TIME)
2660
2661 wait_time +=INTERVAL_TIME
2662
2663 if not undeployed:
2664 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2665
2666 # delete vapp
2667 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2668
2669 if vapp is not None:
2670 wait_time = 0
2671 result = False
2672
2673 while wait_time <= MAX_WAIT_TIME:
2674 vapp = VApp(self.client, resource=vapp_resource)
2675 if not vapp:
2676 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2677 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2678
2679 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2680
2681 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2682 if result.get('status') == 'success':
2683 break
2684 else:
2685 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2686 time.sleep(INTERVAL_TIME)
2687
2688 wait_time +=INTERVAL_TIME
2689
2690 if result is None:
2691 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2692 else:
2693 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2694 return vm__vim_uuid
2695 except:
2696 self.logger.debug(traceback.format_exc())
2697 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2698
2699
2700 def refresh_vms_status(self, vm_list):
2701 """Get the status of the virtual machines and their interfaces/ports
2702 Params: the list of VM identifiers
2703 Returns a dictionary with:
2704 vm_id: #VIM id of this Virtual Machine
2705 status: #Mandatory. Text with one of:
2706 # DELETED (not found at vim)
2707 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2708 # OTHER (Vim reported other status not understood)
2709 # ERROR (VIM indicates an ERROR status)
2710 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2711 # CREATING (on building process), ERROR
2712 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2713 #
2714 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2715 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2716 interfaces:
2717 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2718 mac_address: #Text format XX:XX:XX:XX:XX:XX
2719 vim_net_id: #network id where this interface is connected
2720 vim_interface_id: #interface/port VIM id
2721 ip_address: #null, or text with IPv4, IPv6 address
2722 """
2723
2724 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2725
2726 org,vdc = self.get_vdc_details()
2727 if vdc is None:
2728 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2729
2730 vms_dict = {}
2731 nsx_edge_list = []
2732 for vmuuid in vm_list:
2733 vapp_name = self.get_namebyvappid(vmuuid)
2734 if vapp_name is not None:
2735
2736 try:
2737 vm_pci_details = self.get_vm_pci_details(vmuuid)
2738 vdc_obj = VDC(self.client, href=vdc.get('href'))
2739 vapp_resource = vdc_obj.get_vapp(vapp_name)
2740 the_vapp = VApp(self.client, resource=vapp_resource)
2741
2742 vm_details = {}
2743 for vm in the_vapp.get_all_vms():
2744 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2745 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2746 response = self.perform_request(req_type='GET',
2747 url=vm.get('href'),
2748 headers=headers)
2749
2750 if response.status_code != 200:
2751 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2752 "status code : {}".format(vm.get('href'),
2753 response.content,
2754 response.status_code))
2755 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2756 "VM details")
2757 xmlroot = XmlElementTree.fromstring(response.content)
2758
2759
2760 result = response.content.replace("\n"," ")
2761 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2762 if hdd_match:
2763 hdd_mb = hdd_match.group(1)
2764 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2765 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2766 if cpus_match:
2767 cpus = cpus_match.group(1)
2768 vm_details['cpus'] = int(cpus) if cpus else None
2769 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2770 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2771 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2772 vm_details['id'] = xmlroot.get('id')
2773 vm_details['name'] = xmlroot.get('name')
2774 vm_info = [vm_details]
2775 if vm_pci_details:
2776 vm_info[0].update(vm_pci_details)
2777
2778 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2779 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2780 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2781
2782 # get networks
2783 vm_ip = None
2784 vm_mac = None
2785 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2786 for network in networks:
2787 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2788 vm_mac = mac_s.group(1) if mac_s else None
2789 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2790 vm_ip = ip_s.group(1) if ip_s else None
2791
2792 if vm_ip is None:
2793 if not nsx_edge_list:
2794 nsx_edge_list = self.get_edge_details()
2795 if nsx_edge_list is None:
2796 raise vimconn.vimconnException("refresh_vms_status:"\
2797 "Failed to get edge details from NSX Manager")
2798 if vm_mac is not None:
2799 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2800
2801 net_s = re.search('network="(.*?)"',network)
2802 network_name = net_s.group(1) if net_s else None
2803
2804 vm_net_id = self.get_network_id_by_name(network_name)
2805 interface = {"mac_address": vm_mac,
2806 "vim_net_id": vm_net_id,
2807 "vim_interface_id": vm_net_id,
2808 "ip_address": vm_ip}
2809
2810 vm_dict["interfaces"].append(interface)
2811
2812 # add a vm to vm dict
2813 vms_dict.setdefault(vmuuid, vm_dict)
2814 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2815 except Exception as exp:
2816 self.logger.debug("Error in response {}".format(exp))
2817 self.logger.debug(traceback.format_exc())
2818
2819 return vms_dict
2820
2821
2822 def get_edge_details(self):
2823 """Get the NSX edge list from NSX Manager
2824 Returns list of NSX edges
2825 """
2826 edge_list = []
2827 rheaders = {'Content-Type': 'application/xml'}
2828 nsx_api_url = '/api/4.0/edges'
2829
2830 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2831
2832 try:
2833 resp = requests.get(self.nsx_manager + nsx_api_url,
2834 auth = (self.nsx_user, self.nsx_password),
2835 verify = False, headers = rheaders)
2836 if resp.status_code == requests.codes.ok:
2837 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2838 for edge_pages in paged_Edge_List:
2839 if edge_pages.tag == 'edgePage':
2840 for edge_summary in edge_pages:
2841 if edge_summary.tag == 'pagingInfo':
2842 for element in edge_summary:
2843 if element.tag == 'totalCount' and element.text == '0':
2844 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2845 .format(self.nsx_manager))
2846
2847 if edge_summary.tag == 'edgeSummary':
2848 for element in edge_summary:
2849 if element.tag == 'id':
2850 edge_list.append(element.text)
2851 else:
2852 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2853 .format(self.nsx_manager))
2854
2855 if not edge_list:
2856 raise vimconn.vimconnException("get_edge_details: "\
2857 "No NSX edge details found: {}"
2858 .format(self.nsx_manager))
2859 else:
2860 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2861 return edge_list
2862 else:
2863 self.logger.debug("get_edge_details: "
2864 "Failed to get NSX edge details from NSX Manager: {}"
2865 .format(resp.content))
2866 return None
2867
2868 except Exception as exp:
2869 self.logger.debug("get_edge_details: "\
2870 "Failed to get NSX edge details from NSX Manager: {}"
2871 .format(exp))
2872 raise vimconn.vimconnException("get_edge_details: "\
2873 "Failed to get NSX edge details from NSX Manager: {}"
2874 .format(exp))
2875
2876
2877 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2878 """Get IP address details from NSX edges, using the MAC address
2879 PARAMS: nsx_edges : List of NSX edges
2880 mac_address : Find IP address corresponding to this MAC address
2881 Returns: IP address corrresponding to the provided MAC address
2882 """
2883
2884 ip_addr = None
2885 rheaders = {'Content-Type': 'application/xml'}
2886
2887 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2888
2889 try:
2890 for edge in nsx_edges:
2891 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2892
2893 resp = requests.get(self.nsx_manager + nsx_api_url,
2894 auth = (self.nsx_user, self.nsx_password),
2895 verify = False, headers = rheaders)
2896
2897 if resp.status_code == requests.codes.ok:
2898 dhcp_leases = XmlElementTree.fromstring(resp.text)
2899 for child in dhcp_leases:
2900 if child.tag == 'dhcpLeaseInfo':
2901 dhcpLeaseInfo = child
2902 for leaseInfo in dhcpLeaseInfo:
2903 for elem in leaseInfo:
2904 if (elem.tag)=='macAddress':
2905 edge_mac_addr = elem.text
2906 if (elem.tag)=='ipAddress':
2907 ip_addr = elem.text
2908 if edge_mac_addr is not None:
2909 if edge_mac_addr == mac_address:
2910 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2911 .format(ip_addr, mac_address,edge))
2912 return ip_addr
2913 else:
2914 self.logger.debug("get_ipaddr_from_NSXedge: "\
2915 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2916 .format(resp.content))
2917
2918 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2919 return None
2920
2921 except XmlElementTree.ParseError as Err:
2922 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2923
2924
2925 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2926 """Send and action over a VM instance from VIM
2927 Returns the vm_id if the action was successfully sent to the VIM"""
2928
2929 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2930 if vm__vim_uuid is None or action_dict is None:
2931 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2932
2933 org, vdc = self.get_vdc_details()
2934 if vdc is None:
2935 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2936
2937 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2938 if vapp_name is None:
2939 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2940 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2941 else:
2942 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2943
2944 try:
2945 vdc_obj = VDC(self.client, href=vdc.get('href'))
2946 vapp_resource = vdc_obj.get_vapp(vapp_name)
2947 vapp = VApp(self.client, resource=vapp_resource)
2948 if "start" in action_dict:
2949 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2950 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2951 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2952 self.instance_actions_result("start", result, vapp_name)
2953 elif "rebuild" in action_dict:
2954 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2955 rebuild_task = vapp.deploy(power_on=True)
2956 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
2957 self.instance_actions_result("rebuild", result, vapp_name)
2958 elif "pause" in action_dict:
2959 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2960 pause_task = vapp.undeploy(action='suspend')
2961 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
2962 self.instance_actions_result("pause", result, vapp_name)
2963 elif "resume" in action_dict:
2964 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2965 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
2966 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2967 self.instance_actions_result("resume", result, vapp_name)
2968 elif "shutoff" in action_dict or "shutdown" in action_dict:
2969 action_name , value = action_dict.items()[0]
2970 #For python3
2971 #action_name , value = list(action_dict.items())[0]
2972 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2973 shutdown_task = vapp.shutdown()
2974 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
2975 if action_name == "shutdown":
2976 self.instance_actions_result("shutdown", result, vapp_name)
2977 else:
2978 self.instance_actions_result("shutoff", result, vapp_name)
2979 elif "forceOff" in action_dict:
2980 result = vapp.undeploy(action='powerOff')
2981 self.instance_actions_result("forceOff", result, vapp_name)
2982 elif "reboot" in action_dict:
2983 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2984 reboot_task = vapp.reboot()
2985 self.client.get_task_monitor().wait_for_success(task=reboot_task)
2986 else:
2987 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2988 return vm__vim_uuid
2989 except Exception as exp :
2990 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2991 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2992
2993 def instance_actions_result(self, action, result, vapp_name):
2994 if result.get('status') == 'success':
2995 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2996 else:
2997 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2998
2999 def get_vminstance_console(self, vm_id, console_type="vnc"):
3000 """
3001 Get a console for the virtual machine
3002 Params:
3003 vm_id: uuid of the VM
3004 console_type, can be:
3005 "novnc" (by default), "xvpvnc" for VNC types,
3006 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3007 Returns dict with the console parameters:
3008 protocol: ssh, ftp, http, https, ...
3009 server: usually ip address
3010 port: the http, ssh, ... port
3011 suffix: extra text, e.g. the http path and query string
3012 """
3013 raise vimconn.vimconnNotImplemented("Should have implemented this")
3014
3015 # NOT USED METHODS in current version
3016
3017 def host_vim2gui(self, host, server_dict):
3018 """Transform host dictionary from VIM format to GUI format,
3019 and append to the server_dict
3020 """
3021 raise vimconn.vimconnNotImplemented("Should have implemented this")
3022
3023 def get_hosts_info(self):
3024 """Get the information of deployed hosts
3025 Returns the hosts content"""
3026 raise vimconn.vimconnNotImplemented("Should have implemented this")
3027
3028 def get_hosts(self, vim_tenant):
3029 """Get the hosts and deployed instances
3030 Returns the hosts content"""
3031 raise vimconn.vimconnNotImplemented("Should have implemented this")
3032
3033 def get_processor_rankings(self):
3034 """Get the processor rankings in the VIM database"""
3035 raise vimconn.vimconnNotImplemented("Should have implemented this")
3036
3037 def new_host(self, host_data):
3038 """Adds a new host to VIM"""
3039 '''Returns status code of the VIM response'''
3040 raise vimconn.vimconnNotImplemented("Should have implemented this")
3041
3042 def new_external_port(self, port_data):
3043 """Adds a external port to VIM"""
3044 '''Returns the port identifier'''
3045 raise vimconn.vimconnNotImplemented("Should have implemented this")
3046
3047 def new_external_network(self, net_name, net_type):
3048 """Adds a external network to VIM (shared)"""
3049 '''Returns the network identifier'''
3050 raise vimconn.vimconnNotImplemented("Should have implemented this")
3051
3052 def connect_port_network(self, port_id, network_id, admin=False):
3053 """Connects a external port to a network"""
3054 '''Returns status code of the VIM response'''
3055 raise vimconn.vimconnNotImplemented("Should have implemented this")
3056
3057 def new_vminstancefromJSON(self, vm_data):
3058 """Adds a VM instance to VIM"""
3059 '''Returns the instance identifier'''
3060 raise vimconn.vimconnNotImplemented("Should have implemented this")
3061
3062 def get_network_name_by_id(self, network_uuid=None):
3063 """Method gets vcloud director network named based on supplied uuid.
3064
3065 Args:
3066 network_uuid: network_id
3067
3068 Returns:
3069 The return network name.
3070 """
3071
3072 if not network_uuid:
3073 return None
3074
3075 try:
3076 org_dict = self.get_org(self.org_uuid)
3077 if 'networks' in org_dict:
3078 org_network_dict = org_dict['networks']
3079 for net_uuid in org_network_dict:
3080 if net_uuid == network_uuid:
3081 return org_network_dict[net_uuid]
3082 except:
3083 self.logger.debug("Exception in get_network_name_by_id")
3084 self.logger.debug(traceback.format_exc())
3085
3086 return None
3087
3088 def get_network_id_by_name(self, network_name=None):
3089 """Method gets vcloud director network uuid based on supplied name.
3090
3091 Args:
3092 network_name: network_name
3093 Returns:
3094 The return network uuid.
3095 network_uuid: network_id
3096 """
3097
3098 if not network_name:
3099 self.logger.debug("get_network_id_by_name() : Network name is empty")
3100 return None
3101
3102 try:
3103 org_dict = self.get_org(self.org_uuid)
3104 if org_dict and 'networks' in org_dict:
3105 org_network_dict = org_dict['networks']
3106 for net_uuid,net_name in org_network_dict.iteritems():
3107 #For python3
3108 #for net_uuid,net_name in org_network_dict.items():
3109 if net_name == network_name:
3110 return net_uuid
3111
3112 except KeyError as exp:
3113 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3114
3115 return None
3116
3117 def list_org_action(self):
3118 """
3119 Method leverages vCloud director and query for available organization for particular user
3120
3121 Args:
3122 vca - is active VCA connection.
3123 vdc_name - is a vdc name that will be used to query vms action
3124
3125 Returns:
3126 The return XML respond
3127 """
3128 url_list = [self.url, '/api/org']
3129 vm_list_rest_call = ''.join(url_list)
3130
3131 if self.client._session:
3132 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3133 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3134
3135 response = self.perform_request(req_type='GET',
3136 url=vm_list_rest_call,
3137 headers=headers)
3138
3139 if response.status_code == 403:
3140 response = self.retry_rest('GET', vm_list_rest_call)
3141
3142 if response.status_code == requests.codes.ok:
3143 return response.content
3144
3145 return None
3146
3147 def get_org_action(self, org_uuid=None):
3148 """
3149 Method leverages vCloud director and retrieve available object for organization.
3150
3151 Args:
3152 org_uuid - vCD organization uuid
3153 self.client - is active connection.
3154
3155 Returns:
3156 The return XML respond
3157 """
3158
3159 if org_uuid is None:
3160 return None
3161
3162 url_list = [self.url, '/api/org/', org_uuid]
3163 vm_list_rest_call = ''.join(url_list)
3164
3165 if self.client._session:
3166 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3167 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3168
3169 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3170 response = self.perform_request(req_type='GET',
3171 url=vm_list_rest_call,
3172 headers=headers)
3173 if response.status_code == 403:
3174 response = self.retry_rest('GET', vm_list_rest_call)
3175
3176 if response.status_code == requests.codes.ok:
3177 return response.content
3178 return None
3179
3180 def get_org(self, org_uuid=None):
3181 """
3182 Method retrieves available organization in vCloud Director
3183
3184 Args:
3185 org_uuid - is a organization uuid.
3186
3187 Returns:
3188 The return dictionary with following key
3189 "network" - for network list under the org
3190 "catalogs" - for network list under the org
3191 "vdcs" - for vdc list under org
3192 """
3193
3194 org_dict = {}
3195
3196 if org_uuid is None:
3197 return org_dict
3198
3199 content = self.get_org_action(org_uuid=org_uuid)
3200 try:
3201 vdc_list = {}
3202 network_list = {}
3203 catalog_list = {}
3204 vm_list_xmlroot = XmlElementTree.fromstring(content)
3205 for child in vm_list_xmlroot:
3206 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3207 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3208 org_dict['vdcs'] = vdc_list
3209 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3210 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3211 org_dict['networks'] = network_list
3212 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3213 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3214 org_dict['catalogs'] = catalog_list
3215 except:
3216 pass
3217
3218 return org_dict
3219
3220 def get_org_list(self):
3221 """
3222 Method retrieves available organization in vCloud Director
3223
3224 Args:
3225 vca - is active VCA connection.
3226
3227 Returns:
3228 The return dictionary and key for each entry VDC UUID
3229 """
3230
3231 org_dict = {}
3232
3233 content = self.list_org_action()
3234 try:
3235 vm_list_xmlroot = XmlElementTree.fromstring(content)
3236 for vm_xml in vm_list_xmlroot:
3237 if vm_xml.tag.split("}")[1] == 'Org':
3238 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3239 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3240 except:
3241 pass
3242
3243 return org_dict
3244
3245 def vms_view_action(self, vdc_name=None):
3246 """ Method leverages vCloud director vms query call
3247
3248 Args:
3249 vca - is active VCA connection.
3250 vdc_name - is a vdc name that will be used to query vms action
3251
3252 Returns:
3253 The return XML respond
3254 """
3255 vca = self.connect()
3256 if vdc_name is None:
3257 return None
3258
3259 url_list = [vca.host, '/api/vms/query']
3260 vm_list_rest_call = ''.join(url_list)
3261
3262 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3263 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
3264 vca.vcloud_session.organization.Link)
3265 #For python3
3266 #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
3267 # ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3268 if len(refs) == 1:
3269 response = Http.get(url=vm_list_rest_call,
3270 headers=vca.vcloud_session.get_vcloud_headers(),
3271 verify=vca.verify,
3272 logger=vca.logger)
3273 if response.status_code == requests.codes.ok:
3274 return response.content
3275
3276 return None
3277
3278 def get_vapp_list(self, vdc_name=None):
3279 """
3280 Method retrieves vApp list deployed vCloud director and returns a dictionary
3281 contains a list of all vapp deployed for queried VDC.
3282 The key for a dictionary is vApp UUID
3283
3284
3285 Args:
3286 vca - is active VCA connection.
3287 vdc_name - is a vdc name that will be used to query vms action
3288
3289 Returns:
3290 The return dictionary and key for each entry vapp UUID
3291 """
3292
3293 vapp_dict = {}
3294 if vdc_name is None:
3295 return vapp_dict
3296
3297 content = self.vms_view_action(vdc_name=vdc_name)
3298 try:
3299 vm_list_xmlroot = XmlElementTree.fromstring(content)
3300 for vm_xml in vm_list_xmlroot:
3301 if vm_xml.tag.split("}")[1] == 'VMRecord':
3302 if vm_xml.attrib['isVAppTemplate'] == 'true':
3303 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3304 if 'vappTemplate-' in rawuuid[0]:
3305 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3306 # vm and use raw UUID as key
3307 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3308 except:
3309 pass
3310
3311 return vapp_dict
3312
3313 def get_vm_list(self, vdc_name=None):
3314 """
3315 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3316 contains a list of all VM's deployed for queried VDC.
3317 The key for a dictionary is VM UUID
3318
3319
3320 Args:
3321 vca - is active VCA connection.
3322 vdc_name - is a vdc name that will be used to query vms action
3323
3324 Returns:
3325 The return dictionary and key for each entry vapp UUID
3326 """
3327 vm_dict = {}
3328
3329 if vdc_name is None:
3330 return vm_dict
3331
3332 content = self.vms_view_action(vdc_name=vdc_name)
3333 try:
3334 vm_list_xmlroot = XmlElementTree.fromstring(content)
3335 for vm_xml in vm_list_xmlroot:
3336 if vm_xml.tag.split("}")[1] == 'VMRecord':
3337 if vm_xml.attrib['isVAppTemplate'] == 'false':
3338 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3339 if 'vm-' in rawuuid[0]:
3340 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3341 # vm and use raw UUID as key
3342 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3343 except:
3344 pass
3345
3346 return vm_dict
3347
3348 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3349 """
3350 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3351 contains a list of all VM's deployed for queried VDC.
3352 The key for a dictionary is VM UUID
3353
3354
3355 Args:
3356 vca - is active VCA connection.
3357 vdc_name - is a vdc name that will be used to query vms action
3358
3359 Returns:
3360 The return dictionary and key for each entry vapp UUID
3361 """
3362 vm_dict = {}
3363 vca = self.connect()
3364 if not vca:
3365 raise vimconn.vimconnConnectionException("self.connect() is failed")
3366
3367 if vdc_name is None:
3368 return vm_dict
3369
3370 content = self.vms_view_action(vdc_name=vdc_name)
3371 try:
3372 vm_list_xmlroot = XmlElementTree.fromstring(content)
3373 for vm_xml in vm_list_xmlroot:
3374 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3375 # lookup done by UUID
3376 if isuuid:
3377 if vapp_name in vm_xml.attrib['container']:
3378 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3379 if 'vm-' in rawuuid[0]:
3380 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3381 break
3382 # lookup done by Name
3383 else:
3384 if vapp_name in vm_xml.attrib['name']:
3385 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3386 if 'vm-' in rawuuid[0]:
3387 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3388 break
3389 except:
3390 pass
3391
3392 return vm_dict
3393
3394 def get_network_action(self, network_uuid=None):
3395 """
3396 Method leverages vCloud director and query network based on network uuid
3397
3398 Args:
3399 vca - is active VCA connection.
3400 network_uuid - is a network uuid
3401
3402 Returns:
3403 The return XML respond
3404 """
3405
3406 if network_uuid is None:
3407 return None
3408
3409 url_list = [self.url, '/api/network/', network_uuid]
3410 vm_list_rest_call = ''.join(url_list)
3411
3412 if self.client._session:
3413 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3414 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3415
3416 response = self.perform_request(req_type='GET',
3417 url=vm_list_rest_call,
3418 headers=headers)
3419 #Retry login if session expired & retry sending request
3420 if response.status_code == 403:
3421 response = self.retry_rest('GET', vm_list_rest_call)
3422
3423 if response.status_code == requests.codes.ok:
3424 return response.content
3425
3426 return None
3427
3428 def get_vcd_network(self, network_uuid=None):
3429 """
3430 Method retrieves available network from vCloud Director
3431
3432 Args:
3433 network_uuid - is VCD network UUID
3434
3435 Each element serialized as key : value pair
3436
3437 Following keys available for access. network_configuration['Gateway'}
3438 <Configuration>
3439 <IpScopes>
3440 <IpScope>
3441 <IsInherited>true</IsInherited>
3442 <Gateway>172.16.252.100</Gateway>
3443 <Netmask>255.255.255.0</Netmask>
3444 <Dns1>172.16.254.201</Dns1>
3445 <Dns2>172.16.254.202</Dns2>
3446 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3447 <IsEnabled>true</IsEnabled>
3448 <IpRanges>
3449 <IpRange>
3450 <StartAddress>172.16.252.1</StartAddress>
3451 <EndAddress>172.16.252.99</EndAddress>
3452 </IpRange>
3453 </IpRanges>
3454 </IpScope>
3455 </IpScopes>
3456 <FenceMode>bridged</FenceMode>
3457
3458 Returns:
3459 The return dictionary and key for each entry vapp UUID
3460 """
3461
3462 network_configuration = {}
3463 if network_uuid is None:
3464 return network_uuid
3465
3466 try:
3467 content = self.get_network_action(network_uuid=network_uuid)
3468 vm_list_xmlroot = XmlElementTree.fromstring(content)
3469
3470 network_configuration['status'] = vm_list_xmlroot.get("status")
3471 network_configuration['name'] = vm_list_xmlroot.get("name")
3472 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3473
3474 for child in vm_list_xmlroot:
3475 if child.tag.split("}")[1] == 'IsShared':
3476 network_configuration['isShared'] = child.text.strip()
3477 if child.tag.split("}")[1] == 'Configuration':
3478 for configuration in child.iter():
3479 tagKey = configuration.tag.split("}")[1].strip()
3480 if tagKey != "":
3481 network_configuration[tagKey] = configuration.text.strip()
3482 return network_configuration
3483 except Exception as exp :
3484 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3485 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3486
3487 return network_configuration
3488
3489 def delete_network_action(self, network_uuid=None):
3490 """
3491 Method delete given network from vCloud director
3492
3493 Args:
3494 network_uuid - is a network uuid that client wish to delete
3495
3496 Returns:
3497 The return None or XML respond or false
3498 """
3499 client = self.connect_as_admin()
3500 if not client:
3501 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3502 if network_uuid is None:
3503 return False
3504
3505 url_list = [self.url, '/api/admin/network/', network_uuid]
3506 vm_list_rest_call = ''.join(url_list)
3507
3508 if client._session:
3509 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3510 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3511 response = self.perform_request(req_type='DELETE',
3512 url=vm_list_rest_call,
3513 headers=headers)
3514 if response.status_code == 202:
3515 return True
3516
3517 return False
3518
3519 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3520 ip_profile=None, isshared='true'):
3521 """
3522 Method create network in vCloud director
3523
3524 Args:
3525 network_name - is network name to be created.
3526 net_type - can be 'bridge','data','ptp','mgmt'.
3527 ip_profile is a dict containing the IP parameters of the network
3528 isshared - is a boolean
3529 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3530 It optional attribute. by default if no parent network indicate the first available will be used.
3531
3532 Returns:
3533 The return network uuid or return None
3534 """
3535
3536 new_network_name = [network_name, '-', str(uuid.uuid4())]
3537 content = self.create_network_rest(network_name=''.join(new_network_name),
3538 ip_profile=ip_profile,
3539 net_type=net_type,
3540 parent_network_uuid=parent_network_uuid,
3541 isshared=isshared)
3542 if content is None:
3543 self.logger.debug("Failed create network {}.".format(network_name))
3544 return None
3545
3546 try:
3547 vm_list_xmlroot = XmlElementTree.fromstring(content)
3548 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3549 if len(vcd_uuid) == 4:
3550 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3551 return vcd_uuid[3]
3552 except:
3553 self.logger.debug("Failed create network {}".format(network_name))
3554 return None
3555
3556 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3557 ip_profile=None, isshared='true'):
3558 """
3559 Method create network in vCloud director
3560
3561 Args:
3562 network_name - is network name to be created.
3563 net_type - can be 'bridge','data','ptp','mgmt'.
3564 ip_profile is a dict containing the IP parameters of the network
3565 isshared - is a boolean
3566 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3567 It optional attribute. by default if no parent network indicate the first available will be used.
3568
3569 Returns:
3570 The return network uuid or return None
3571 """
3572 client_as_admin = self.connect_as_admin()
3573 if not client_as_admin:
3574 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3575 if network_name is None:
3576 return None
3577
3578 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3579 vm_list_rest_call = ''.join(url_list)
3580
3581 if client_as_admin._session:
3582 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3583 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3584
3585 response = self.perform_request(req_type='GET',
3586 url=vm_list_rest_call,
3587 headers=headers)
3588
3589 provider_network = None
3590 available_networks = None
3591 add_vdc_rest_url = None
3592
3593 if response.status_code != requests.codes.ok:
3594 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3595 response.status_code))
3596 return None
3597 else:
3598 try:
3599 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3600 for child in vm_list_xmlroot:
3601 if child.tag.split("}")[1] == 'ProviderVdcReference':
3602 provider_network = child.attrib.get('href')
3603 # application/vnd.vmware.admin.providervdc+xml
3604 if child.tag.split("}")[1] == 'Link':
3605 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3606 and child.attrib.get('rel') == 'add':
3607 add_vdc_rest_url = child.attrib.get('href')
3608 except:
3609 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3610 self.logger.debug("Respond body {}".format(response.content))
3611 return None
3612
3613 # find pvdc provided available network
3614 response = self.perform_request(req_type='GET',
3615 url=provider_network,
3616 headers=headers)
3617 if response.status_code != requests.codes.ok:
3618 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3619 response.status_code))
3620 return None
3621
3622 if parent_network_uuid is None:
3623 try:
3624 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3625 for child in vm_list_xmlroot.iter():
3626 if child.tag.split("}")[1] == 'AvailableNetworks':
3627 for networks in child.iter():
3628 # application/vnd.vmware.admin.network+xml
3629 if networks.attrib.get('href') is not None:
3630 available_networks = networks.attrib.get('href')
3631 break
3632 except:
3633 return None
3634
3635 try:
3636 #Configure IP profile of the network
3637 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3638
3639 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3640 subnet_rand = random.randint(0, 255)
3641 ip_base = "192.168.{}.".format(subnet_rand)
3642 ip_profile['subnet_address'] = ip_base + "0/24"
3643 else:
3644 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3645
3646 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3647 ip_profile['gateway_address']=ip_base + "1"
3648 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3649 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3650 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3651 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3652 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3653 ip_profile['dhcp_start_address']=ip_base + "3"
3654 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3655 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3656 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3657 ip_profile['dns_address']=ip_base + "2"
3658
3659 gateway_address=ip_profile['gateway_address']
3660 dhcp_count=int(ip_profile['dhcp_count'])
3661 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3662
3663 if ip_profile['dhcp_enabled']==True:
3664 dhcp_enabled='true'
3665 else:
3666 dhcp_enabled='false'
3667 dhcp_start_address=ip_profile['dhcp_start_address']
3668
3669 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3670 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3671 end_ip_int += dhcp_count - 1
3672 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3673
3674 ip_version=ip_profile['ip_version']
3675 dns_address=ip_profile['dns_address']
3676 except KeyError as exp:
3677 self.logger.debug("Create Network REST: Key error {}".format(exp))
3678 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3679
3680 # either use client provided UUID or search for a first available
3681 # if both are not defined we return none
3682 if parent_network_uuid is not None:
3683 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3684 add_vdc_rest_url = ''.join(url_list)
3685
3686 #Creating all networks as Direct Org VDC type networks.
3687 #Unused in case of Underlay (data/ptp) network interface.
3688 fence_mode="bridged"
3689 is_inherited='false'
3690 dns_list = dns_address.split(";")
3691 dns1 = dns_list[0]
3692 dns2_text = ""
3693 if len(dns_list) >= 2:
3694 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3695 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3696 <Description>Openmano created</Description>
3697 <Configuration>
3698 <IpScopes>
3699 <IpScope>
3700 <IsInherited>{1:s}</IsInherited>
3701 <Gateway>{2:s}</Gateway>
3702 <Netmask>{3:s}</Netmask>
3703 <Dns1>{4:s}</Dns1>{5:s}
3704 <IsEnabled>{6:s}</IsEnabled>
3705 <IpRanges>
3706 <IpRange>
3707 <StartAddress>{7:s}</StartAddress>
3708 <EndAddress>{8:s}</EndAddress>
3709 </IpRange>
3710 </IpRanges>
3711 </IpScope>
3712 </IpScopes>
3713 <ParentNetwork href="{9:s}"/>
3714 <FenceMode>{10:s}</FenceMode>
3715 </Configuration>
3716 <IsShared>{11:s}</IsShared>
3717 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3718 subnet_address, dns1, dns2_text, dhcp_enabled,
3719 dhcp_start_address, dhcp_end_address, available_networks,
3720 fence_mode, isshared)
3721
3722 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3723 try:
3724 response = self.perform_request(req_type='POST',
3725 url=add_vdc_rest_url,
3726 headers=headers,
3727 data=data)
3728
3729 if response.status_code != 201:
3730 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
3731 .format(response.status_code,response.content))
3732 else:
3733 network_task = self.get_task_from_response(response.content)
3734 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3735 time.sleep(5)
3736 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3737 if result.get('status') == 'success':
3738 return response.content
3739 else:
3740 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3741 .format(response.content))
3742 except Exception as exp:
3743 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3744
3745 return None
3746
3747 def convert_cidr_to_netmask(self, cidr_ip=None):
3748 """
3749 Method sets convert CIDR netmask address to normal IP format
3750 Args:
3751 cidr_ip : CIDR IP address
3752 Returns:
3753 netmask : Converted netmask
3754 """
3755 if cidr_ip is not None:
3756 if '/' in cidr_ip:
3757 network, net_bits = cidr_ip.split('/')
3758 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3759 else:
3760 netmask = cidr_ip
3761 return netmask
3762 return None
3763
3764 def get_provider_rest(self, vca=None):
3765 """
3766 Method gets provider vdc view from vcloud director
3767
3768 Args:
3769 network_name - is network name to be created.
3770 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3771 It optional attribute. by default if no parent network indicate the first available will be used.
3772
3773 Returns:
3774 The return xml content of respond or None
3775 """
3776
3777 url_list = [self.url, '/api/admin']
3778 if vca:
3779 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3780 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3781 response = self.perform_request(req_type='GET',
3782 url=''.join(url_list),
3783 headers=headers)
3784
3785 if response.status_code == requests.codes.ok:
3786 return response.content
3787 return None
3788
3789 def create_vdc(self, vdc_name=None):
3790
3791 vdc_dict = {}
3792
3793 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3794 if xml_content is not None:
3795 try:
3796 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3797 for child in task_resp_xmlroot:
3798 if child.tag.split("}")[1] == 'Owner':
3799 vdc_id = child.attrib.get('href').split("/")[-1]
3800 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3801 return vdc_dict
3802 except:
3803 self.logger.debug("Respond body {}".format(xml_content))
3804
3805 return None
3806
3807 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3808 """
3809 Method create vdc in vCloud director based on VDC template.
3810 it uses pre-defined template.
3811
3812 Args:
3813 vdc_name - name of a new vdc.
3814
3815 Returns:
3816 The return xml content of respond or None
3817 """
3818 # pre-requesite atleast one vdc template should be available in vCD
3819 self.logger.info("Creating new vdc {}".format(vdc_name))
3820 vca = self.connect_as_admin()
3821 if not vca:
3822 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3823 if vdc_name is None:
3824 return None
3825
3826 url_list = [self.url, '/api/vdcTemplates']
3827 vm_list_rest_call = ''.join(url_list)
3828
3829 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3830 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
3831 response = self.perform_request(req_type='GET',
3832 url=vm_list_rest_call,
3833 headers=headers)
3834
3835 # container url to a template
3836 vdc_template_ref = None
3837 try:
3838 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3839 for child in vm_list_xmlroot:
3840 # application/vnd.vmware.admin.providervdc+xml
3841 # we need find a template from witch we instantiate VDC
3842 if child.tag.split("}")[1] == 'VdcTemplate':
3843 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3844 vdc_template_ref = child.attrib.get('href')
3845 except:
3846 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3847 self.logger.debug("Respond body {}".format(response.content))
3848 return None
3849
3850 # if we didn't found required pre defined template we return None
3851 if vdc_template_ref is None:
3852 return None
3853
3854 try:
3855 # instantiate vdc
3856 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
3857 vm_list_rest_call = ''.join(url_list)
3858 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3859 <Source href="{1:s}"></Source>
3860 <Description>opnemano</Description>
3861 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3862
3863 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3864
3865 response = self.perform_request(req_type='POST',
3866 url=vm_list_rest_call,
3867 headers=headers,
3868 data=data)
3869
3870 vdc_task = self.get_task_from_response(response.content)
3871 self.client.get_task_monitor().wait_for_success(task=vdc_task)
3872
3873 # if we all ok we respond with content otherwise by default None
3874 if response.status_code >= 200 and response.status_code < 300:
3875 return response.content
3876 return None
3877 except:
3878 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3879 self.logger.debug("Respond body {}".format(response.content))
3880
3881 return None
3882
3883 def create_vdc_rest(self, vdc_name=None):
3884 """
3885 Method create network in vCloud director
3886
3887 Args:
3888 vdc_name - vdc name to be created
3889 Returns:
3890 The return response
3891 """
3892
3893 self.logger.info("Creating new vdc {}".format(vdc_name))
3894
3895 vca = self.connect_as_admin()
3896 if not vca:
3897 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3898 if vdc_name is None:
3899 return None
3900
3901 url_list = [self.url, '/api/admin/org/', self.org_uuid]
3902 vm_list_rest_call = ''.join(url_list)
3903
3904 if vca._session:
3905 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3906 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3907 response = self.perform_request(req_type='GET',
3908 url=vm_list_rest_call,
3909 headers=headers)
3910
3911 provider_vdc_ref = None
3912 add_vdc_rest_url = None
3913 available_networks = None
3914
3915 if response.status_code != requests.codes.ok:
3916 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3917 response.status_code))
3918 return None
3919 else:
3920 try:
3921 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3922 for child in vm_list_xmlroot:
3923 # application/vnd.vmware.admin.providervdc+xml
3924 if child.tag.split("}")[1] == 'Link':
3925 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3926 and child.attrib.get('rel') == 'add':
3927 add_vdc_rest_url = child.attrib.get('href')
3928 except:
3929 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3930 self.logger.debug("Respond body {}".format(response.content))
3931 return None
3932
3933 response = self.get_provider_rest(vca=vca)
3934 try:
3935 vm_list_xmlroot = XmlElementTree.fromstring(response)
3936 for child in vm_list_xmlroot:
3937 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3938 for sub_child in child:
3939 provider_vdc_ref = sub_child.attrib.get('href')
3940 except:
3941 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3942 self.logger.debug("Respond body {}".format(response))
3943 return None
3944
3945 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3946 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3947 <AllocationModel>ReservationPool</AllocationModel>
3948 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3949 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3950 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3951 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3952 <ProviderVdcReference
3953 name="Main Provider"
3954 href="{2:s}" />
3955 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3956 escape(vdc_name),
3957 provider_vdc_ref)
3958
3959 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3960
3961 response = self.perform_request(req_type='POST',
3962 url=add_vdc_rest_url,
3963 headers=headers,
3964 data=data)
3965
3966 # if we all ok we respond with content otherwise by default None
3967 if response.status_code == 201:
3968 return response.content
3969 return None
3970
3971 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3972 """
3973 Method retrieve vapp detail from vCloud director
3974
3975 Args:
3976 vapp_uuid - is vapp identifier.
3977
3978 Returns:
3979 The return network uuid or return None
3980 """
3981
3982 parsed_respond = {}
3983 vca = None
3984
3985 if need_admin_access:
3986 vca = self.connect_as_admin()
3987 else:
3988 vca = self.client
3989
3990 if not vca:
3991 raise vimconn.vimconnConnectionException("Failed to connect vCD")
3992 if vapp_uuid is None:
3993 return None
3994
3995 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
3996 get_vapp_restcall = ''.join(url_list)
3997
3998 if vca._session:
3999 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4000 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4001 response = self.perform_request(req_type='GET',
4002 url=get_vapp_restcall,
4003 headers=headers)
4004
4005 if response.status_code == 403:
4006 if need_admin_access == False:
4007 response = self.retry_rest('GET', get_vapp_restcall)
4008
4009 if response.status_code != requests.codes.ok:
4010 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4011 response.status_code))
4012 return parsed_respond
4013
4014 try:
4015 xmlroot_respond = XmlElementTree.fromstring(response.content)
4016 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4017
4018 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4019 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4020 'vmw': 'http://www.vmware.com/schema/ovf',
4021 'vm': 'http://www.vmware.com/vcloud/v1.5',
4022 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4023 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4024 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4025 }
4026
4027 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4028 if created_section is not None:
4029 parsed_respond['created'] = created_section.text
4030
4031 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4032 if network_section is not None and 'networkName' in network_section.attrib:
4033 parsed_respond['networkname'] = network_section.attrib['networkName']
4034
4035 ipscopes_section = \
4036 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4037 namespaces)
4038 if ipscopes_section is not None:
4039 for ipscope in ipscopes_section:
4040 for scope in ipscope:
4041 tag_key = scope.tag.split("}")[1]
4042 if tag_key == 'IpRanges':
4043 ip_ranges = scope.getchildren()
4044 for ipblock in ip_ranges:
4045 for block in ipblock:
4046 parsed_respond[block.tag.split("}")[1]] = block.text
4047 else:
4048 parsed_respond[tag_key] = scope.text
4049
4050 # parse children section for other attrib
4051 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4052 if children_section is not None:
4053 parsed_respond['name'] = children_section.attrib['name']
4054 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4055 if "nestedHypervisorEnabled" in children_section.attrib else None
4056 parsed_respond['deployed'] = children_section.attrib['deployed']
4057 parsed_respond['status'] = children_section.attrib['status']
4058 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4059 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4060 nic_list = []
4061 for adapters in network_adapter:
4062 adapter_key = adapters.tag.split("}")[1]
4063 if adapter_key == 'PrimaryNetworkConnectionIndex':
4064 parsed_respond['primarynetwork'] = adapters.text
4065 if adapter_key == 'NetworkConnection':
4066 vnic = {}
4067 if 'network' in adapters.attrib:
4068 vnic['network'] = adapters.attrib['network']
4069 for adapter in adapters:
4070 setting_key = adapter.tag.split("}")[1]
4071 vnic[setting_key] = adapter.text
4072 nic_list.append(vnic)
4073
4074 for link in children_section:
4075 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4076 if link.attrib['rel'] == 'screen:acquireTicket':
4077 parsed_respond['acquireTicket'] = link.attrib
4078 if link.attrib['rel'] == 'screen:acquireMksTicket':
4079 parsed_respond['acquireMksTicket'] = link.attrib
4080
4081 parsed_respond['interfaces'] = nic_list
4082 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4083 if vCloud_extension_section is not None:
4084 vm_vcenter_info = {}
4085 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4086 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4087 if vmext is not None:
4088 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4089 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4090
4091 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4092 vm_virtual_hardware_info = {}
4093 if virtual_hardware_section is not None:
4094 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4095 if item.find("rasd:Description",namespaces).text == "Hard disk":
4096 disk_size = item.find("rasd:HostResource" ,namespaces
4097 ).attrib["{"+namespaces['vm']+"}capacity"]
4098
4099 vm_virtual_hardware_info["disk_size"]= disk_size
4100 break
4101
4102 for link in virtual_hardware_section:
4103 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4104 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4105 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4106 break
4107
4108 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4109 except Exception as exp :
4110 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4111 return parsed_respond
4112
4113 def acquire_console(self, vm_uuid=None):
4114
4115 if vm_uuid is None:
4116 return None
4117 if self.client._session:
4118 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4119 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4120 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4121 console_dict = vm_dict['acquireTicket']
4122 console_rest_call = console_dict['href']
4123
4124 response = self.perform_request(req_type='POST',
4125 url=console_rest_call,
4126 headers=headers)
4127
4128 if response.status_code == 403:
4129 response = self.retry_rest('POST', console_rest_call)
4130
4131 if response.status_code == requests.codes.ok:
4132 return response.content
4133
4134 return None
4135
4136 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4137 """
4138 Method retrieve vm disk details
4139
4140 Args:
4141 vapp_uuid - is vapp identifier.
4142 flavor_disk - disk size as specified in VNFD (flavor)
4143
4144 Returns:
4145 The return network uuid or return None
4146 """
4147 status = None
4148 try:
4149 #Flavor disk is in GB convert it into MB
4150 flavor_disk = int(flavor_disk) * 1024
4151 vm_details = self.get_vapp_details_rest(vapp_uuid)
4152 if vm_details:
4153 vm_name = vm_details["name"]
4154 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4155
4156 if vm_details and "vm_virtual_hardware" in vm_details:
4157 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4158 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4159
4160 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4161
4162 if flavor_disk > vm_disk:
4163 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4164 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4165 vm_disk, flavor_disk ))
4166 else:
4167 status = True
4168 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4169
4170 return status
4171 except Exception as exp:
4172 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4173
4174
4175 def modify_vm_disk_rest(self, disk_href , disk_size):
4176 """
4177 Method retrieve modify vm disk size
4178
4179 Args:
4180 disk_href - vCD API URL to GET and PUT disk data
4181 disk_size - disk size as specified in VNFD (flavor)
4182
4183 Returns:
4184 The return network uuid or return None
4185 """
4186 if disk_href is None or disk_size is None:
4187 return None
4188
4189 if self.client._session:
4190 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4191 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4192 response = self.perform_request(req_type='GET',
4193 url=disk_href,
4194 headers=headers)
4195
4196 if response.status_code == 403:
4197 response = self.retry_rest('GET', disk_href)
4198
4199 if response.status_code != requests.codes.ok:
4200 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4201 response.status_code))
4202 return None
4203 try:
4204 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4205 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4206 #For python3
4207 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
4208 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4209
4210 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4211 if item.find("rasd:Description",namespaces).text == "Hard disk":
4212 disk_item = item.find("rasd:HostResource" ,namespaces )
4213 if disk_item is not None:
4214 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4215 break
4216
4217 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4218 xml_declaration=True)
4219
4220 #Send PUT request to modify disk size
4221 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4222
4223 response = self.perform_request(req_type='PUT',
4224 url=disk_href,
4225 headers=headers,
4226 data=data)
4227 if response.status_code == 403:
4228 add_headers = {'Content-Type': headers['Content-Type']}
4229 response = self.retry_rest('PUT', disk_href, add_headers, data)
4230
4231 if response.status_code != 202:
4232 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4233 response.status_code))
4234 else:
4235 modify_disk_task = self.get_task_from_response(response.content)
4236 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4237 if result.get('status') == 'success':
4238 return True
4239 else:
4240 return False
4241 return None
4242
4243 except Exception as exp :
4244 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4245 return None
4246
4247 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4248 """
4249 Method to attach pci devices to VM
4250
4251 Args:
4252 vapp_uuid - uuid of vApp/VM
4253 pci_devices - pci devices infromation as specified in VNFD (flavor)
4254
4255 Returns:
4256 The status of add pci device task , vm object and
4257 vcenter_conect object
4258 """
4259 vm_obj = None
4260 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4261 vcenter_conect, content = self.get_vcenter_content()
4262 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4263
4264 if vm_moref_id:
4265 try:
4266 no_of_pci_devices = len(pci_devices)
4267 if no_of_pci_devices > 0:
4268 #Get VM and its host
4269 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4270 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4271 if host_obj and vm_obj:
4272 #get PCI devies from host on which vapp is currently installed
4273 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4274
4275 if avilable_pci_devices is None:
4276 #find other hosts with active pci devices
4277 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4278 content,
4279 no_of_pci_devices
4280 )
4281
4282 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4283 #Migrate vm to the host where PCI devices are availble
4284 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4285 task = self.relocate_vm(new_host_obj, vm_obj)
4286 if task is not None:
4287 result = self.wait_for_vcenter_task(task, vcenter_conect)
4288 self.logger.info("Migrate VM status: {}".format(result))
4289 host_obj = new_host_obj
4290 else:
4291 self.logger.info("Fail to migrate VM : {}".format(result))
4292 raise vimconn.vimconnNotFoundException(
4293 "Fail to migrate VM : {} to host {}".format(
4294 vmname_andid,
4295 new_host_obj)
4296 )
4297
4298 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4299 #Add PCI devices one by one
4300 for pci_device in avilable_pci_devices:
4301 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4302 if task:
4303 status= self.wait_for_vcenter_task(task, vcenter_conect)
4304 if status:
4305 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4306 else:
4307 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4308 return True, vm_obj, vcenter_conect
4309 else:
4310 self.logger.error("Currently there is no host with"\
4311 " {} number of avaialble PCI devices required for VM {}".format(
4312 no_of_pci_devices,
4313 vmname_andid)
4314 )
4315 raise vimconn.vimconnNotFoundException(
4316 "Currently there is no host with {} "\
4317 "number of avaialble PCI devices required for VM {}".format(
4318 no_of_pci_devices,
4319 vmname_andid))
4320 else:
4321 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4322
4323 except vmodl.MethodFault as error:
4324 self.logger.error("Error occurred while adding PCI devices {} ",error)
4325 return None, vm_obj, vcenter_conect
4326
4327 def get_vm_obj(self, content, mob_id):
4328 """
4329 Method to get the vsphere VM object associated with a given morf ID
4330 Args:
4331 vapp_uuid - uuid of vApp/VM
4332 content - vCenter content object
4333 mob_id - mob_id of VM
4334
4335 Returns:
4336 VM and host object
4337 """
4338 vm_obj = None
4339 host_obj = None
4340 try :
4341 container = content.viewManager.CreateContainerView(content.rootFolder,
4342 [vim.VirtualMachine], True
4343 )
4344 for vm in container.view:
4345 mobID = vm._GetMoId()
4346 if mobID == mob_id:
4347 vm_obj = vm
4348 host_obj = vm_obj.runtime.host
4349 break
4350 except Exception as exp:
4351 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4352 return host_obj, vm_obj
4353
4354 def get_pci_devices(self, host, need_devices):
4355 """
4356 Method to get the details of pci devices on given host
4357 Args:
4358 host - vSphere host object
4359 need_devices - number of pci devices needed on host
4360
4361 Returns:
4362 array of pci devices
4363 """
4364 all_devices = []
4365 all_device_ids = []
4366 used_devices_ids = []
4367
4368 try:
4369 if host:
4370 pciPassthruInfo = host.config.pciPassthruInfo
4371 pciDevies = host.hardware.pciDevice
4372
4373 for pci_status in pciPassthruInfo:
4374 if pci_status.passthruActive:
4375 for device in pciDevies:
4376 if device.id == pci_status.id:
4377 all_device_ids.append(device.id)
4378 all_devices.append(device)
4379
4380 #check if devices are in use
4381 avalible_devices = all_devices
4382 for vm in host.vm:
4383 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4384 vm_devices = vm.config.hardware.device
4385 for device in vm_devices:
4386 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4387 if device.backing.id in all_device_ids:
4388 for use_device in avalible_devices:
4389 if use_device.id == device.backing.id:
4390 avalible_devices.remove(use_device)
4391 used_devices_ids.append(device.backing.id)
4392 self.logger.debug("Device {} from devices {}"\
4393 "is in use".format(device.backing.id,
4394 device)
4395 )
4396 if len(avalible_devices) < need_devices:
4397 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4398 need_devices))
4399 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4400 avalible_devices))
4401 return None
4402 else:
4403 required_devices = avalible_devices[:need_devices]
4404 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4405 len(avalible_devices),
4406 host,
4407 need_devices))
4408 self.logger.info("Retruning {} devices as {}".format(need_devices,
4409 required_devices ))
4410 return required_devices
4411
4412 except Exception as exp:
4413 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4414
4415 return None
4416
4417 def get_host_and_PCIdevices(self, content, need_devices):
4418 """
4419 Method to get the details of pci devices infromation on all hosts
4420
4421 Args:
4422 content - vSphere host object
4423 need_devices - number of pci devices needed on host
4424
4425 Returns:
4426 array of pci devices and host object
4427 """
4428 host_obj = None
4429 pci_device_objs = None
4430 try:
4431 if content:
4432 container = content.viewManager.CreateContainerView(content.rootFolder,
4433 [vim.HostSystem], True)
4434 for host in container.view:
4435 devices = self.get_pci_devices(host, need_devices)
4436 if devices:
4437 host_obj = host
4438 pci_device_objs = devices
4439 break
4440 except Exception as exp:
4441 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4442
4443 return host_obj,pci_device_objs
4444
4445 def relocate_vm(self, dest_host, vm) :
4446 """
4447 Method to get the relocate VM to new host
4448
4449 Args:
4450 dest_host - vSphere host object
4451 vm - vSphere VM object
4452
4453 Returns:
4454 task object
4455 """
4456 task = None
4457 try:
4458 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4459 task = vm.Relocate(relocate_spec)
4460 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4461 except Exception as exp:
4462 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4463 dest_host, vm, exp))
4464 return task
4465
4466 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4467 """
4468 Waits and provides updates on a vSphere task
4469 """
4470 while task.info.state == vim.TaskInfo.State.running:
4471 time.sleep(2)
4472
4473 if task.info.state == vim.TaskInfo.State.success:
4474 if task.info.result is not None and not hideResult:
4475 self.logger.info('{} completed successfully, result: {}'.format(
4476 actionName,
4477 task.info.result))
4478 else:
4479 self.logger.info('Task {} completed successfully.'.format(actionName))
4480 else:
4481 self.logger.error('{} did not complete successfully: {} '.format(
4482 actionName,
4483 task.info.error)
4484 )
4485
4486 return task.info.result
4487
4488 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4489 """
4490 Method to add pci device in given VM
4491
4492 Args:
4493 host_object - vSphere host object
4494 vm_object - vSphere VM object
4495 host_pci_dev - host_pci_dev must be one of the devices from the
4496 host_object.hardware.pciDevice list
4497 which is configured as a PCI passthrough device
4498
4499 Returns:
4500 task object
4501 """
4502 task = None
4503 if vm_object and host_object and host_pci_dev:
4504 try :
4505 #Add PCI device to VM
4506 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4507 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4508
4509 if host_pci_dev.id not in systemid_by_pciid:
4510 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4511 return None
4512
4513 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4514 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4515 id=host_pci_dev.id,
4516 systemId=systemid_by_pciid[host_pci_dev.id],
4517 vendorId=host_pci_dev.vendorId,
4518 deviceName=host_pci_dev.deviceName)
4519
4520 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4521
4522 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4523 new_device_config.operation = "add"
4524 vmConfigSpec = vim.vm.ConfigSpec()
4525 vmConfigSpec.deviceChange = [new_device_config]
4526
4527 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4528 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4529 host_pci_dev, vm_object, host_object)
4530 )
4531 except Exception as exp:
4532 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4533 host_pci_dev,
4534 vm_object,
4535 exp))
4536 return task
4537
4538 def get_vm_vcenter_info(self):
4539 """
4540 Method to get details of vCenter and vm
4541
4542 Args:
4543 vapp_uuid - uuid of vApp or VM
4544
4545 Returns:
4546 Moref Id of VM and deails of vCenter
4547 """
4548 vm_vcenter_info = {}
4549
4550 if self.vcenter_ip is not None:
4551 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4552 else:
4553 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4554 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4555 if self.vcenter_port is not None:
4556 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4557 else:
4558 raise vimconn.vimconnException(message="vCenter port is not provided."\
4559 " Please provide vCenter port while attaching datacenter to tenant in --config")
4560 if self.vcenter_user is not None:
4561 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4562 else:
4563 raise vimconn.vimconnException(message="vCenter user is not provided."\
4564 " Please provide vCenter user while attaching datacenter to tenant in --config")
4565
4566 if self.vcenter_password is not None:
4567 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4568 else:
4569 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4570 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4571
4572 return vm_vcenter_info
4573
4574
4575 def get_vm_pci_details(self, vmuuid):
4576 """
4577 Method to get VM PCI device details from vCenter
4578
4579 Args:
4580 vm_obj - vSphere VM object
4581
4582 Returns:
4583 dict of PCI devives attached to VM
4584
4585 """
4586 vm_pci_devices_info = {}
4587 try:
4588 vcenter_conect, content = self.get_vcenter_content()
4589 vm_moref_id = self.get_vm_moref_id(vmuuid)
4590 if vm_moref_id:
4591 #Get VM and its host
4592 if content:
4593 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4594 if host_obj and vm_obj:
4595 vm_pci_devices_info["host_name"]= host_obj.name
4596 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4597 for device in vm_obj.config.hardware.device:
4598 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4599 device_details={'devide_id':device.backing.id,
4600 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4601 }
4602 vm_pci_devices_info[device.deviceInfo.label] = device_details
4603 else:
4604 self.logger.error("Can not connect to vCenter while getting "\
4605 "PCI devices infromationn")
4606 return vm_pci_devices_info
4607 except Exception as exp:
4608 self.logger.error("Error occurred while getting VM infromationn"\
4609 " for VM : {}".format(exp))
4610 raise vimconn.vimconnException(message=exp)
4611
4612
4613 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4614 """
4615 Method to reserve memory for all VMs
4616 Args :
4617 vapp - VApp
4618 memory_mb - Memory in MB
4619 Returns:
4620 None
4621 """
4622
4623 self.logger.info("Reserve memory for all VMs")
4624 for vms in vapp.get_all_vms():
4625 vm_id = vms.get('id').split(':')[-1]
4626
4627 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4628
4629 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4630 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4631 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4632 response = self.perform_request(req_type='GET',
4633 url=url_rest_call,
4634 headers=headers)
4635
4636 if response.status_code == 403:
4637 response = self.retry_rest('GET', url_rest_call)
4638
4639 if response.status_code != 200:
4640 self.logger.error("REST call {} failed reason : {}"\
4641 "status code : {}".format(url_rest_call,
4642 response.content,
4643 response.status_code))
4644 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4645 "memory")
4646
4647 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4648 contentelem = lxmlElementTree.XML(bytexml)
4649 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
4650 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4651
4652 # Find the reservation element in the response
4653 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4654 for memelem in memelem_list:
4655 memelem.text = str(memory_mb)
4656
4657 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4658
4659 response = self.perform_request(req_type='PUT',
4660 url=url_rest_call,
4661 headers=headers,
4662 data=newdata)
4663
4664 if response.status_code == 403:
4665 add_headers = {'Content-Type': headers['Content-Type']}
4666 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4667
4668 if response.status_code != 202:
4669 self.logger.error("REST call {} failed reason : {}"\
4670 "status code : {} ".format(url_rest_call,
4671 response.content,
4672 response.status_code))
4673 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4674 "virtual hardware memory section")
4675 else:
4676 mem_task = self.get_task_from_response(response.content)
4677 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4678 if result.get('status') == 'success':
4679 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4680 .format(vm_id))
4681 else:
4682 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4683 .format(vm_id))
4684
4685 def remove_primary_network_adapter_from_all_vms(self, vapp):
4686 """
4687 Method to remove network adapter type to vm
4688 Args :
4689 vapp - VApp
4690 Returns:
4691 None
4692 """
4693
4694 self.logger.info("Removing network adapter from all VMs")
4695 for vms in vapp.get_all_vms():
4696 vm_id = vms.get('id').split(':')[-1]
4697
4698 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4699
4700 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4701 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4702 response = self.perform_request(req_type='GET',
4703 url=url_rest_call,
4704 headers=headers)
4705
4706 if response.status_code == 403:
4707 response = self.retry_rest('GET', url_rest_call)
4708
4709 if response.status_code != 200:
4710 self.logger.error("REST call {} failed reason : {}"\
4711 "status code : {}".format(url_rest_call,
4712 response.content,
4713 response.status_code))
4714 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
4715 "network connection section")
4716
4717 data = response.content
4718 data = data.split('<Link rel="edit"')[0]
4719
4720 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4721
4722 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4723 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
4724 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
4725 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
4726 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
4727 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
4728 xmlns:vmw="http://www.vmware.com/schema/ovf"
4729 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
4730 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
4731 xmlns:ns9="http://www.vmware.com/vcloud/versions"
4732 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
4733 <ovf:Info>Specifies the available VM network connections</ovf:Info>
4734 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
4735 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
4736 </NetworkConnectionSection>""".format(url=url_rest_call)
4737 response = self.perform_request(req_type='PUT',
4738 url=url_rest_call,
4739 headers=headers,
4740 data=newdata)
4741
4742 if response.status_code == 403:
4743 add_headers = {'Content-Type': headers['Content-Type']}
4744 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4745
4746 if response.status_code != 202:
4747 self.logger.error("REST call {} failed reason : {}"\
4748 "status code : {} ".format(url_rest_call,
4749 response.content,
4750 response.status_code))
4751 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
4752 "network connection section")
4753 else:
4754 nic_task = self.get_task_from_response(response.content)
4755 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4756 if result.get('status') == 'success':
4757 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
4758 "default NIC type".format(vm_id))
4759 else:
4760 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
4761 "connect NIC type".format(vm_id))
4762
4763 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
4764 """
4765 Method to add network adapter type to vm
4766 Args :
4767 network_name - name of network
4768 primary_nic_index - int value for primary nic index
4769 nicIndex - int value for nic index
4770 nic_type - specify model name to which add to vm
4771 Returns:
4772 None
4773 """
4774
4775 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
4776 format(network_name, nicIndex, nic_type))
4777 try:
4778 ip_address = None
4779 floating_ip = False
4780 mac_address = None
4781 if 'floating_ip' in net: floating_ip = net['floating_ip']
4782
4783 # Stub for ip_address feature
4784 if 'ip_address' in net: ip_address = net['ip_address']
4785
4786 if 'mac_address' in net: mac_address = net['mac_address']
4787
4788 if floating_ip:
4789 allocation_mode = "POOL"
4790 elif ip_address:
4791 allocation_mode = "MANUAL"
4792 else:
4793 allocation_mode = "DHCP"
4794
4795 if not nic_type:
4796 for vms in vapp.get_all_vms():
4797 vm_id = vms.get('id').split(':')[-1]
4798
4799 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4800
4801 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4802 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4803 response = self.perform_request(req_type='GET',
4804 url=url_rest_call,
4805 headers=headers)
4806
4807 if response.status_code == 403:
4808 response = self.retry_rest('GET', url_rest_call)
4809
4810 if response.status_code != 200:
4811 self.logger.error("REST call {} failed reason : {}"\
4812 "status code : {}".format(url_rest_call,
4813 response.content,
4814 response.status_code))
4815 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4816 "network connection section")
4817
4818 data = response.content
4819 data = data.split('<Link rel="edit"')[0]
4820 if '<PrimaryNetworkConnectionIndex>' not in data:
4821 self.logger.debug("add_network_adapter PrimaryNIC not in data")
4822 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
4823 <NetworkConnection network="{}">
4824 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4825 <IsConnected>true</IsConnected>
4826 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4827 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
4828 allocation_mode)
4829 # Stub for ip_address feature
4830 if ip_address:
4831 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4832 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4833
4834 if mac_address:
4835 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4836 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4837
4838 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
4839 else:
4840 self.logger.debug("add_network_adapter PrimaryNIC in data")
4841 new_item = """<NetworkConnection network="{}">
4842 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4843 <IsConnected>true</IsConnected>
4844 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4845 </NetworkConnection>""".format(network_name, nicIndex,
4846 allocation_mode)
4847 # Stub for ip_address feature
4848 if ip_address:
4849 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4850 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4851
4852 if mac_address:
4853 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4854 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4855
4856 data = data + new_item + '</NetworkConnectionSection>'
4857
4858 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4859
4860 response = self.perform_request(req_type='PUT',
4861 url=url_rest_call,
4862 headers=headers,
4863 data=data)
4864
4865 if response.status_code == 403:
4866 add_headers = {'Content-Type': headers['Content-Type']}
4867 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
4868
4869 if response.status_code != 202:
4870 self.logger.error("REST call {} failed reason : {}"\
4871 "status code : {} ".format(url_rest_call,
4872 response.content,
4873 response.status_code))
4874 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
4875 "network connection section")
4876 else:
4877 nic_task = self.get_task_from_response(response.content)
4878 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4879 if result.get('status') == 'success':
4880 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
4881 "default NIC type".format(vm_id))
4882 else:
4883 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
4884 "connect NIC type".format(vm_id))
4885 else:
4886 for vms in vapp.get_all_vms():
4887 vm_id = vms.get('id').split(':')[-1]
4888
4889 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
4890
4891 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4892 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4893 response = self.perform_request(req_type='GET',
4894 url=url_rest_call,
4895 headers=headers)
4896
4897 if response.status_code == 403:
4898 response = self.retry_rest('GET', url_rest_call)
4899
4900 if response.status_code != 200:
4901 self.logger.error("REST call {} failed reason : {}"\
4902 "status code : {}".format(url_rest_call,
4903 response.content,
4904 response.status_code))
4905 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
4906 "network connection section")
4907 data = response.content
4908 data = data.split('<Link rel="edit"')[0]
4909 vcd_netadapter_type = nic_type
4910 if nic_type in ['SR-IOV', 'VF']:
4911 vcd_netadapter_type = "SRIOVETHERNETCARD"
4912
4913 if '<PrimaryNetworkConnectionIndex>' not in data:
4914 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
4915 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
4916 <NetworkConnection network="{}">
4917 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4918 <IsConnected>true</IsConnected>
4919 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4920 <NetworkAdapterType>{}</NetworkAdapterType>
4921 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
4922 allocation_mode, vcd_netadapter_type)
4923 # Stub for ip_address feature
4924 if ip_address:
4925 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4926 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4927
4928 if mac_address:
4929 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4930 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4931
4932 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
4933 else:
4934 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
4935 new_item = """<NetworkConnection network="{}">
4936 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
4937 <IsConnected>true</IsConnected>
4938 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
4939 <NetworkAdapterType>{}</NetworkAdapterType>
4940 </NetworkConnection>""".format(network_name, nicIndex,
4941 allocation_mode, vcd_netadapter_type)
4942 # Stub for ip_address feature
4943 if ip_address:
4944 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
4945 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
4946
4947 if mac_address:
4948 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
4949 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
4950
4951 data = data + new_item + '</NetworkConnectionSection>'
4952
4953 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
4954
4955 response = self.perform_request(req_type='PUT',
4956 url=url_rest_call,
4957 headers=headers,
4958 data=data)
4959
4960 if response.status_code == 403:
4961 add_headers = {'Content-Type': headers['Content-Type']}
4962 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
4963
4964 if response.status_code != 202:
4965 self.logger.error("REST call {} failed reason : {}"\
4966 "status code : {}".format(url_rest_call,
4967 response.content,
4968 response.status_code))
4969 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
4970 "network connection section")
4971 else:
4972 nic_task = self.get_task_from_response(response.content)
4973 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
4974 if result.get('status') == 'success':
4975 self.logger.info("add_network_adapter_to_vms(): VM {} "\
4976 "conneced to NIC type {}".format(vm_id, nic_type))
4977 else:
4978 self.logger.error("add_network_adapter_to_vms(): VM {} "\
4979 "failed to connect NIC type {}".format(vm_id, nic_type))
4980 except Exception as exp:
4981 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
4982 "while adding Network adapter")
4983 raise vimconn.vimconnException(message=exp)
4984
4985
4986 def set_numa_affinity(self, vmuuid, paired_threads_id):
4987 """
4988 Method to assign numa affinity in vm configuration parammeters
4989 Args :
4990 vmuuid - vm uuid
4991 paired_threads_id - one or more virtual processor
4992 numbers
4993 Returns:
4994 return if True
4995 """
4996 try:
4997 vcenter_conect, content = self.get_vcenter_content()
4998 vm_moref_id = self.get_vm_moref_id(vmuuid)
4999
5000 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5001 if vm_obj:
5002 config_spec = vim.vm.ConfigSpec()
5003 config_spec.extraConfig = []
5004 opt = vim.option.OptionValue()
5005 opt.key = 'numa.nodeAffinity'
5006 opt.value = str(paired_threads_id)
5007 config_spec.extraConfig.append(opt)
5008 task = vm_obj.ReconfigVM_Task(config_spec)
5009 if task:
5010 result = self.wait_for_vcenter_task(task, vcenter_conect)
5011 extra_config = vm_obj.config.extraConfig
5012 flag = False
5013 for opts in extra_config:
5014 if 'numa.nodeAffinity' in opts.key:
5015 flag = True
5016 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5017 "value {} for vm {}".format(opt.value, vm_obj))
5018 if flag:
5019 return
5020 else:
5021 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5022 except Exception as exp:
5023 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5024 "for VM {} : {}".format(vm_obj, vm_moref_id))
5025 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5026 "affinity".format(exp))
5027
5028
5029 def cloud_init(self, vapp, cloud_config):
5030 """
5031 Method to inject ssh-key
5032 vapp - vapp object
5033 cloud_config a dictionary with:
5034 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5035 'users': (optional) list of users to be inserted, each item is a dict with:
5036 'name': (mandatory) user name,
5037 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5038 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5039 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5040 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5041 'dest': (mandatory) string with the destination absolute path
5042 'encoding': (optional, by default text). Can be one of:
5043 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5044 'content' (mandatory): string with the content of the file
5045 'permissions': (optional) string with file permissions, typically octal notation '0644'
5046 'owner': (optional) file owner, string with the format 'owner:group'
5047 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5048 """
5049 try:
5050 if not isinstance(cloud_config, dict):
5051 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5052 else:
5053 key_pairs = []
5054 userdata = []
5055 if "key-pairs" in cloud_config:
5056 key_pairs = cloud_config["key-pairs"]
5057
5058 if "users" in cloud_config:
5059 userdata = cloud_config["users"]
5060
5061 self.logger.debug("cloud_init : Guest os customization started..")
5062 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5063 customize_script = customize_script.replace("&","&amp;")
5064 self.guest_customization(vapp, customize_script)
5065
5066 except Exception as exp:
5067 self.logger.error("cloud_init : exception occurred while injecting "\
5068 "ssh-key")
5069 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5070 "ssh-key".format(exp))
5071
5072 def format_script(self, key_pairs=[], users_list=[]):
5073 bash_script = """#!/bin/sh
5074 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5075 if [ "$1" = "precustomization" ];then
5076 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5077 """
5078
5079 keys = "\n".join(key_pairs)
5080 if keys:
5081 keys_data = """
5082 if [ ! -d /root/.ssh ];then
5083 mkdir /root/.ssh
5084 chown root:root /root/.ssh
5085 chmod 700 /root/.ssh
5086 touch /root/.ssh/authorized_keys
5087 chown root:root /root/.ssh/authorized_keys
5088 chmod 600 /root/.ssh/authorized_keys
5089 # make centos with selinux happy
5090 which restorecon && restorecon -Rv /root/.ssh
5091 else
5092 touch /root/.ssh/authorized_keys
5093 chown root:root /root/.ssh/authorized_keys
5094 chmod 600 /root/.ssh/authorized_keys
5095 fi
5096 echo '{key}' >> /root/.ssh/authorized_keys
5097 """.format(key=keys)
5098
5099 bash_script+= keys_data
5100
5101 for user in users_list:
5102 if 'name' in user: user_name = user['name']
5103 if 'key-pairs' in user:
5104 user_keys = "\n".join(user['key-pairs'])
5105 else:
5106 user_keys = None
5107
5108 add_user_name = """
5109 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5110 """.format(user_name=user_name)
5111
5112 bash_script+= add_user_name
5113
5114 if user_keys:
5115 user_keys_data = """
5116 mkdir /home/{user_name}/.ssh
5117 chown {user_name}:{user_name} /home/{user_name}/.ssh
5118 chmod 700 /home/{user_name}/.ssh
5119 touch /home/{user_name}/.ssh/authorized_keys
5120 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5121 chmod 600 /home/{user_name}/.ssh/authorized_keys
5122 # make centos with selinux happy
5123 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5124 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5125 """.format(user_name=user_name,user_key=user_keys)
5126
5127 bash_script+= user_keys_data
5128
5129 return bash_script+"\n\tfi"
5130
5131 def guest_customization(self, vapp, customize_script):
5132 """
5133 Method to customize guest os
5134 vapp - Vapp object
5135 customize_script - Customize script to be run at first boot of VM.
5136 """
5137 for vm in vapp.get_all_vms():
5138 vm_id = vm.get('id').split(':')[-1]
5139 vm_name = vm.get('name')
5140 vm_name = vm_name.replace('_','-')
5141
5142 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5143 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5144 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5145
5146 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5147
5148 data = """<GuestCustomizationSection
5149 xmlns="http://www.vmware.com/vcloud/v1.5"
5150 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5151 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5152 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5153 <Enabled>true</Enabled>
5154 <ChangeSid>false</ChangeSid>
5155 <VirtualMachineId>{}</VirtualMachineId>
5156 <JoinDomainEnabled>false</JoinDomainEnabled>
5157 <UseOrgSettings>false</UseOrgSettings>
5158 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5159 <AdminPasswordAuto>true</AdminPasswordAuto>
5160 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5161 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5162 <ResetPasswordRequired>false</ResetPasswordRequired>
5163 <CustomizationScript>{}</CustomizationScript>
5164 <ComputerName>{}</ComputerName>
5165 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5166 </GuestCustomizationSection>
5167 """.format(vm_customization_url,
5168 vm_id,
5169 customize_script,
5170 vm_name,
5171 vm_customization_url)
5172
5173 response = self.perform_request(req_type='PUT',
5174 url=vm_customization_url,
5175 headers=headers,
5176 data=data)
5177 if response.status_code == 202:
5178 guest_task = self.get_task_from_response(response.content)
5179 self.client.get_task_monitor().wait_for_success(task=guest_task)
5180 self.logger.info("guest_customization : customized guest os task "\
5181 "completed for VM {}".format(vm_name))
5182 else:
5183 self.logger.error("guest_customization : task for customized guest os"\
5184 "failed for VM {}".format(vm_name))
5185 raise vimconn.vimconnException("guest_customization : failed to perform"\
5186 "guest os customization on VM {}".format(vm_name))
5187
5188 def add_new_disk(self, vapp_uuid, disk_size):
5189 """
5190 Method to create an empty vm disk
5191
5192 Args:
5193 vapp_uuid - is vapp identifier.
5194 disk_size - size of disk to be created in GB
5195
5196 Returns:
5197 None
5198 """
5199 status = False
5200 vm_details = None
5201 try:
5202 #Disk size in GB, convert it into MB
5203 if disk_size is not None:
5204 disk_size_mb = int(disk_size) * 1024
5205 vm_details = self.get_vapp_details_rest(vapp_uuid)
5206
5207 if vm_details and "vm_virtual_hardware" in vm_details:
5208 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5209 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5210 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5211
5212 except Exception as exp:
5213 msg = "Error occurred while creating new disk {}.".format(exp)
5214 self.rollback_newvm(vapp_uuid, msg)
5215
5216 if status:
5217 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5218 else:
5219 #If failed to add disk, delete VM
5220 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5221 self.rollback_newvm(vapp_uuid, msg)
5222
5223
5224 def add_new_disk_rest(self, disk_href, disk_size_mb):
5225 """
5226 Retrives vApp Disks section & add new empty disk
5227
5228 Args:
5229 disk_href: Disk section href to addd disk
5230 disk_size_mb: Disk size in MB
5231
5232 Returns: Status of add new disk task
5233 """
5234 status = False
5235 if self.client._session:
5236 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5237 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5238 response = self.perform_request(req_type='GET',
5239 url=disk_href,
5240 headers=headers)
5241
5242 if response.status_code == 403:
5243 response = self.retry_rest('GET', disk_href)
5244
5245 if response.status_code != requests.codes.ok:
5246 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5247 .format(disk_href, response.status_code))
5248 return status
5249 try:
5250 #Find but type & max of instance IDs assigned to disks
5251 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5252 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
5253 #For python3
5254 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
5255 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5256 instance_id = 0
5257 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5258 if item.find("rasd:Description",namespaces).text == "Hard disk":
5259 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5260 if inst_id > instance_id:
5261 instance_id = inst_id
5262 disk_item = item.find("rasd:HostResource" ,namespaces)
5263 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5264 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5265
5266 instance_id = instance_id + 1
5267 new_item = """<Item>
5268 <rasd:Description>Hard disk</rasd:Description>
5269 <rasd:ElementName>New disk</rasd:ElementName>
5270 <rasd:HostResource
5271 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5272 vcloud:capacity="{}"
5273 vcloud:busSubType="{}"
5274 vcloud:busType="{}"></rasd:HostResource>
5275 <rasd:InstanceID>{}</rasd:InstanceID>
5276 <rasd:ResourceType>17</rasd:ResourceType>
5277 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5278
5279 new_data = response.content
5280 #Add new item at the bottom
5281 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5282
5283 # Send PUT request to modify virtual hardware section with new disk
5284 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5285
5286 response = self.perform_request(req_type='PUT',
5287 url=disk_href,
5288 data=new_data,
5289 headers=headers)
5290
5291 if response.status_code == 403:
5292 add_headers = {'Content-Type': headers['Content-Type']}
5293 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5294
5295 if response.status_code != 202:
5296 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5297 .format(disk_href, response.status_code, response.content))
5298 else:
5299 add_disk_task = self.get_task_from_response(response.content)
5300 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5301 if result.get('status') == 'success':
5302 status = True
5303 else:
5304 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5305
5306 except Exception as exp:
5307 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5308
5309 return status
5310
5311
5312 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5313 """
5314 Method to add existing disk to vm
5315 Args :
5316 catalogs - List of VDC catalogs
5317 image_id - Catalog ID
5318 template_name - Name of template in catalog
5319 vapp_uuid - UUID of vApp
5320 Returns:
5321 None
5322 """
5323 disk_info = None
5324 vcenter_conect, content = self.get_vcenter_content()
5325 #find moref-id of vm in image
5326 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5327 image_id=image_id,
5328 )
5329
5330 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5331 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5332 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5333 if catalog_vm_moref_id:
5334 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5335 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5336 if catalog_vm_obj:
5337 #find existing disk
5338 disk_info = self.find_disk(catalog_vm_obj)
5339 else:
5340 exp_msg = "No VM with image id {} found".format(image_id)
5341 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5342 else:
5343 exp_msg = "No Image found with image ID {} ".format(image_id)
5344 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5345
5346 if disk_info:
5347 self.logger.info("Existing disk_info : {}".format(disk_info))
5348 #get VM
5349 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5350 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5351 if vm_obj:
5352 status = self.add_disk(vcenter_conect=vcenter_conect,
5353 vm=vm_obj,
5354 disk_info=disk_info,
5355 size=size,
5356 vapp_uuid=vapp_uuid
5357 )
5358 if status:
5359 self.logger.info("Disk from image id {} added to {}".format(image_id,
5360 vm_obj.config.name)
5361 )
5362 else:
5363 msg = "No disk found with image id {} to add in VM {}".format(
5364 image_id,
5365 vm_obj.config.name)
5366 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5367
5368
5369 def find_disk(self, vm_obj):
5370 """
5371 Method to find details of existing disk in VM
5372 Args :
5373 vm_obj - vCenter object of VM
5374 image_id - Catalog ID
5375 Returns:
5376 disk_info : dict of disk details
5377 """
5378 disk_info = {}
5379 if vm_obj:
5380 try:
5381 devices = vm_obj.config.hardware.device
5382 for device in devices:
5383 if type(device) is vim.vm.device.VirtualDisk:
5384 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5385 disk_info["full_path"] = device.backing.fileName
5386 disk_info["datastore"] = device.backing.datastore
5387 disk_info["capacityKB"] = device.capacityInKB
5388 break
5389 except Exception as exp:
5390 self.logger.error("find_disk() : exception occurred while "\
5391 "getting existing disk details :{}".format(exp))
5392 return disk_info
5393
5394
5395 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5396 """
5397 Method to add existing disk in VM
5398 Args :
5399 vcenter_conect - vCenter content object
5400 vm - vCenter vm object
5401 disk_info : dict of disk details
5402 Returns:
5403 status : status of add disk task
5404 """
5405 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5406 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5407 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5408 if size is not None:
5409 #Convert size from GB to KB
5410 sizeKB = int(size) * 1024 * 1024
5411 #compare size of existing disk and user given size.Assign whicherver is greater
5412 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5413 sizeKB, capacityKB))
5414 if sizeKB > capacityKB:
5415 capacityKB = sizeKB
5416
5417 if datastore and fullpath and capacityKB:
5418 try:
5419 spec = vim.vm.ConfigSpec()
5420 # get all disks on a VM, set unit_number to the next available
5421 unit_number = 0
5422 for dev in vm.config.hardware.device:
5423 if hasattr(dev.backing, 'fileName'):
5424 unit_number = int(dev.unitNumber) + 1
5425 # unit_number 7 reserved for scsi controller
5426 if unit_number == 7:
5427 unit_number += 1
5428 if isinstance(dev, vim.vm.device.VirtualDisk):
5429 #vim.vm.device.VirtualSCSIController
5430 controller_key = dev.controllerKey
5431
5432 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5433 unit_number, controller_key))
5434 # add disk here
5435 dev_changes = []
5436 disk_spec = vim.vm.device.VirtualDeviceSpec()
5437 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5438 disk_spec.device = vim.vm.device.VirtualDisk()
5439 disk_spec.device.backing = \
5440 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5441 disk_spec.device.backing.thinProvisioned = True
5442 disk_spec.device.backing.diskMode = 'persistent'
5443 disk_spec.device.backing.datastore = datastore
5444 disk_spec.device.backing.fileName = fullpath
5445
5446 disk_spec.device.unitNumber = unit_number
5447 disk_spec.device.capacityInKB = capacityKB
5448 disk_spec.device.controllerKey = controller_key
5449 dev_changes.append(disk_spec)
5450 spec.deviceChange = dev_changes
5451 task = vm.ReconfigVM_Task(spec=spec)
5452 status = self.wait_for_vcenter_task(task, vcenter_conect)
5453 return status
5454 except Exception as exp:
5455 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5456 "{} to vm {}".format(exp,
5457 fullpath,
5458 vm.config.name)
5459 self.rollback_newvm(vapp_uuid, exp_msg)
5460 else:
5461 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5462 self.rollback_newvm(vapp_uuid, msg)
5463
5464
5465 def get_vcenter_content(self):
5466 """
5467 Get the vsphere content object
5468 """
5469 try:
5470 vm_vcenter_info = self.get_vm_vcenter_info()
5471 except Exception as exp:
5472 self.logger.error("Error occurred while getting vCenter infromationn"\
5473 " for VM : {}".format(exp))
5474 raise vimconn.vimconnException(message=exp)
5475
5476 context = None
5477 if hasattr(ssl, '_create_unverified_context'):
5478 context = ssl._create_unverified_context()
5479
5480 vcenter_conect = SmartConnect(
5481 host=vm_vcenter_info["vm_vcenter_ip"],
5482 user=vm_vcenter_info["vm_vcenter_user"],
5483 pwd=vm_vcenter_info["vm_vcenter_password"],
5484 port=int(vm_vcenter_info["vm_vcenter_port"]),
5485 sslContext=context
5486 )
5487 atexit.register(Disconnect, vcenter_conect)
5488 content = vcenter_conect.RetrieveContent()
5489 return vcenter_conect, content
5490
5491
5492 def get_vm_moref_id(self, vapp_uuid):
5493 """
5494 Get the moref_id of given VM
5495 """
5496 try:
5497 if vapp_uuid:
5498 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5499 if vm_details and "vm_vcenter_info" in vm_details:
5500 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5501 return vm_moref_id
5502
5503 except Exception as exp:
5504 self.logger.error("Error occurred while getting VM moref ID "\
5505 " for VM : {}".format(exp))
5506 return None
5507
5508
5509 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5510 """
5511 Method to get vApp template details
5512 Args :
5513 catalogs - list of VDC catalogs
5514 image_id - Catalog ID to find
5515 template_name : template name in catalog
5516 Returns:
5517 parsed_respond : dict of vApp tempalte details
5518 """
5519 parsed_response = {}
5520
5521 vca = self.connect_as_admin()
5522 if not vca:
5523 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5524
5525 try:
5526 org, vdc = self.get_vdc_details()
5527 catalog = self.get_catalog_obj(image_id, catalogs)
5528 if catalog:
5529 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5530 catalog_items = [items.attrib]
5531
5532 if len(catalog_items) == 1:
5533 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5534 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5535
5536 response = self.perform_request(req_type='GET',
5537 url=catalog_items[0].get('href'),
5538 headers=headers)
5539 catalogItem = XmlElementTree.fromstring(response.content)
5540 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5541 vapp_tempalte_href = entity.get("href")
5542 #get vapp details and parse moref id
5543
5544 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5545 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5546 'vmw': 'http://www.vmware.com/schema/ovf',
5547 'vm': 'http://www.vmware.com/vcloud/v1.5',
5548 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5549 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5550 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5551 }
5552
5553 if vca._session:
5554 response = self.perform_request(req_type='GET',
5555 url=vapp_tempalte_href,
5556 headers=headers)
5557
5558 if response.status_code != requests.codes.ok:
5559 self.logger.debug("REST API call {} failed. Return status code {}".format(
5560 vapp_tempalte_href, response.status_code))
5561
5562 else:
5563 xmlroot_respond = XmlElementTree.fromstring(response.content)
5564 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5565 if children_section is not None:
5566 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5567 if vCloud_extension_section is not None:
5568 vm_vcenter_info = {}
5569 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5570 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5571 if vmext is not None:
5572 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5573 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5574
5575 except Exception as exp :
5576 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5577
5578 return parsed_response
5579
5580
5581 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5582 """
5583 Method to delete vApp
5584 Args :
5585 vapp_uuid - vApp UUID
5586 msg - Error message to be logged
5587 exp_type : Exception type
5588 Returns:
5589 None
5590 """
5591 if vapp_uuid:
5592 status = self.delete_vminstance(vapp_uuid)
5593 else:
5594 msg = "No vApp ID"
5595 self.logger.error(msg)
5596 if exp_type == "Genric":
5597 raise vimconn.vimconnException(msg)
5598 elif exp_type == "NotFound":
5599 raise vimconn.vimconnNotFoundException(message=msg)
5600
5601 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5602 """
5603 Method to attach SRIOV adapters to VM
5604
5605 Args:
5606 vapp_uuid - uuid of vApp/VM
5607 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5608 vmname_andid - vmname
5609
5610 Returns:
5611 The status of add SRIOV adapter task , vm object and
5612 vcenter_conect object
5613 """
5614 vm_obj = None
5615 vcenter_conect, content = self.get_vcenter_content()
5616 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5617
5618 if vm_moref_id:
5619 try:
5620 no_of_sriov_devices = len(sriov_nets)
5621 if no_of_sriov_devices > 0:
5622 #Get VM and its host
5623 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5624 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5625 if host_obj and vm_obj:
5626 #get SRIOV devies from host on which vapp is currently installed
5627 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5628 no_of_sriov_devices,
5629 )
5630
5631 if len(avilable_sriov_devices) == 0:
5632 #find other hosts with active pci devices
5633 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5634 content,
5635 no_of_sriov_devices,
5636 )
5637
5638 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5639 #Migrate vm to the host where SRIOV devices are available
5640 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5641 new_host_obj))
5642 task = self.relocate_vm(new_host_obj, vm_obj)
5643 if task is not None:
5644 result = self.wait_for_vcenter_task(task, vcenter_conect)
5645 self.logger.info("Migrate VM status: {}".format(result))
5646 host_obj = new_host_obj
5647 else:
5648 self.logger.info("Fail to migrate VM : {}".format(result))
5649 raise vimconn.vimconnNotFoundException(
5650 "Fail to migrate VM : {} to host {}".format(
5651 vmname_andid,
5652 new_host_obj)
5653 )
5654
5655 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5656 #Add SRIOV devices one by one
5657 for sriov_net in sriov_nets:
5658 network_name = sriov_net.get('net_id')
5659 dvs_portgr_name = self.create_dvPort_group(network_name)
5660 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
5661 #add vlan ID ,Modify portgroup for vlan ID
5662 self.configure_vlanID(content, vcenter_conect, network_name)
5663
5664 task = self.add_sriov_to_vm(content,
5665 vm_obj,
5666 host_obj,
5667 network_name,
5668 avilable_sriov_devices[0]
5669 )
5670 if task:
5671 status= self.wait_for_vcenter_task(task, vcenter_conect)
5672 if status:
5673 self.logger.info("Added SRIOV {} to VM {}".format(
5674 no_of_sriov_devices,
5675 str(vm_obj)))
5676 else:
5677 self.logger.error("Fail to add SRIOV {} to VM {}".format(
5678 no_of_sriov_devices,
5679 str(vm_obj)))
5680 raise vimconn.vimconnUnexpectedResponse(
5681 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
5682 )
5683 return True, vm_obj, vcenter_conect
5684 else:
5685 self.logger.error("Currently there is no host with"\
5686 " {} number of avaialble SRIOV "\
5687 "VFs required for VM {}".format(
5688 no_of_sriov_devices,
5689 vmname_andid)
5690 )
5691 raise vimconn.vimconnNotFoundException(
5692 "Currently there is no host with {} "\
5693 "number of avaialble SRIOV devices required for VM {}".format(
5694 no_of_sriov_devices,
5695 vmname_andid))
5696 else:
5697 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
5698
5699 except vmodl.MethodFault as error:
5700 self.logger.error("Error occurred while adding SRIOV {} ",error)
5701 return None, vm_obj, vcenter_conect
5702
5703
5704 def get_sriov_devices(self,host, no_of_vfs):
5705 """
5706 Method to get the details of SRIOV devices on given host
5707 Args:
5708 host - vSphere host object
5709 no_of_vfs - number of VFs needed on host
5710
5711 Returns:
5712 array of SRIOV devices
5713 """
5714 sriovInfo=[]
5715 if host:
5716 for device in host.config.pciPassthruInfo:
5717 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
5718 if device.numVirtualFunction >= no_of_vfs:
5719 sriovInfo.append(device)
5720 break
5721 return sriovInfo
5722
5723
5724 def get_host_and_sriov_devices(self, content, no_of_vfs):
5725 """
5726 Method to get the details of SRIOV devices infromation on all hosts
5727
5728 Args:
5729 content - vSphere host object
5730 no_of_vfs - number of pci VFs needed on host
5731
5732 Returns:
5733 array of SRIOV devices and host object
5734 """
5735 host_obj = None
5736 sriov_device_objs = None
5737 try:
5738 if content:
5739 container = content.viewManager.CreateContainerView(content.rootFolder,
5740 [vim.HostSystem], True)
5741 for host in container.view:
5742 devices = self.get_sriov_devices(host, no_of_vfs)
5743 if devices:
5744 host_obj = host
5745 sriov_device_objs = devices
5746 break
5747 except Exception as exp:
5748 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
5749
5750 return host_obj,sriov_device_objs
5751
5752
5753 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
5754 """
5755 Method to add SRIOV adapter to vm
5756
5757 Args:
5758 host_obj - vSphere host object
5759 vm_obj - vSphere vm object
5760 content - vCenter content object
5761 network_name - name of distributed virtaul portgroup
5762 sriov_device - SRIOV device info
5763
5764 Returns:
5765 task object
5766 """
5767 devices = []
5768 vnic_label = "sriov nic"
5769 try:
5770 dvs_portgr = self.get_dvport_group(network_name)
5771 network_name = dvs_portgr.name
5772 nic = vim.vm.device.VirtualDeviceSpec()
5773 # VM device
5774 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5775 nic.device = vim.vm.device.VirtualSriovEthernetCard()
5776 nic.device.addressType = 'assigned'
5777 #nic.device.key = 13016
5778 nic.device.deviceInfo = vim.Description()
5779 nic.device.deviceInfo.label = vnic_label
5780 nic.device.deviceInfo.summary = network_name
5781 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
5782
5783 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
5784 nic.device.backing.deviceName = network_name
5785 nic.device.backing.useAutoDetect = False
5786 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
5787 nic.device.connectable.startConnected = True
5788 nic.device.connectable.allowGuestControl = True
5789
5790 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
5791 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
5792 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
5793
5794 devices.append(nic)
5795 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
5796 task = vm_obj.ReconfigVM_Task(vmconf)
5797 return task
5798 except Exception as exp:
5799 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
5800 return None
5801
5802
5803 def create_dvPort_group(self, network_name):
5804 """
5805 Method to create disributed virtual portgroup
5806
5807 Args:
5808 network_name - name of network/portgroup
5809
5810 Returns:
5811 portgroup key
5812 """
5813 try:
5814 new_network_name = [network_name, '-', str(uuid.uuid4())]
5815 network_name=''.join(new_network_name)
5816 vcenter_conect, content = self.get_vcenter_content()
5817
5818 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
5819 if dv_switch:
5820 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5821 dv_pg_spec.name = network_name
5822
5823 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
5824 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5825 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
5826 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
5827 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
5828 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
5829
5830 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
5831 self.wait_for_vcenter_task(task, vcenter_conect)
5832
5833 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
5834 if dvPort_group:
5835 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
5836 return dvPort_group.key
5837 else:
5838 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
5839
5840 except Exception as exp:
5841 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
5842 " : {}".format(network_name, exp))
5843 return None
5844
5845 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
5846 """
5847 Method to reconfigure disributed virtual portgroup
5848
5849 Args:
5850 dvPort_group_name - name of disributed virtual portgroup
5851 content - vCenter content object
5852 config_info - disributed virtual portgroup configuration
5853
5854 Returns:
5855 task object
5856 """
5857 try:
5858 dvPort_group = self.get_dvport_group(dvPort_group_name)
5859 if dvPort_group:
5860 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
5861 dv_pg_spec.configVersion = dvPort_group.config.configVersion
5862 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
5863 if "vlanID" in config_info:
5864 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
5865 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
5866
5867 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
5868 return task
5869 else:
5870 return None
5871 except Exception as exp:
5872 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
5873 " : {}".format(dvPort_group_name, exp))
5874 return None
5875
5876
5877 def destroy_dvport_group(self , dvPort_group_name):
5878 """
5879 Method to destroy disributed virtual portgroup
5880
5881 Args:
5882 network_name - name of network/portgroup
5883
5884 Returns:
5885 True if portgroup successfully got deleted else false
5886 """
5887 vcenter_conect, content = self.get_vcenter_content()
5888 try:
5889 status = None
5890 dvPort_group = self.get_dvport_group(dvPort_group_name)
5891 if dvPort_group:
5892 task = dvPort_group.Destroy_Task()
5893 status = self.wait_for_vcenter_task(task, vcenter_conect)
5894 return status
5895 except vmodl.MethodFault as exp:
5896 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
5897 exp, dvPort_group_name))
5898 return None
5899
5900
5901 def get_dvport_group(self, dvPort_group_name):
5902 """
5903 Method to get disributed virtual portgroup
5904
5905 Args:
5906 network_name - name of network/portgroup
5907
5908 Returns:
5909 portgroup object
5910 """
5911 vcenter_conect, content = self.get_vcenter_content()
5912 dvPort_group = None
5913 try:
5914 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
5915 for item in container.view:
5916 if item.key == dvPort_group_name:
5917 dvPort_group = item
5918 break
5919 return dvPort_group
5920 except vmodl.MethodFault as exp:
5921 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
5922 exp, dvPort_group_name))
5923 return None
5924
5925 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
5926 """
5927 Method to get disributed virtual portgroup vlanID
5928
5929 Args:
5930 network_name - name of network/portgroup
5931
5932 Returns:
5933 vlan ID
5934 """
5935 vlanId = None
5936 try:
5937 dvPort_group = self.get_dvport_group(dvPort_group_name)
5938 if dvPort_group:
5939 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
5940 except vmodl.MethodFault as exp:
5941 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
5942 exp, dvPort_group_name))
5943 return vlanId
5944
5945
5946 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
5947 """
5948 Method to configure vlanID in disributed virtual portgroup vlanID
5949
5950 Args:
5951 network_name - name of network/portgroup
5952
5953 Returns:
5954 None
5955 """
5956 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
5957 if vlanID == 0:
5958 #configure vlanID
5959 vlanID = self.genrate_vlanID(dvPort_group_name)
5960 config = {"vlanID":vlanID}
5961 task = self.reconfig_portgroup(content, dvPort_group_name,
5962 config_info=config)
5963 if task:
5964 status= self.wait_for_vcenter_task(task, vcenter_conect)
5965 if status:
5966 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
5967 dvPort_group_name,vlanID))
5968 else:
5969 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
5970 dvPort_group_name, vlanID))
5971
5972
5973 def genrate_vlanID(self, network_name):
5974 """
5975 Method to get unused vlanID
5976 Args:
5977 network_name - name of network/portgroup
5978 Returns:
5979 vlanID
5980 """
5981 vlan_id = None
5982 used_ids = []
5983 if self.config.get('vlanID_range') == None:
5984 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
5985 "at config value before creating sriov network with vlan tag")
5986 if "used_vlanIDs" not in self.persistent_info:
5987 self.persistent_info["used_vlanIDs"] = {}
5988 else:
5989 used_ids = self.persistent_info["used_vlanIDs"].values()
5990 #For python3
5991 #used_ids = list(self.persistent_info["used_vlanIDs"].values())
5992
5993 for vlanID_range in self.config.get('vlanID_range'):
5994 start_vlanid , end_vlanid = vlanID_range.split("-")
5995 if start_vlanid > end_vlanid:
5996 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
5997 vlanID_range))
5998
5999 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
6000 #For python3
6001 #for id in range(int(start_vlanid), int(end_vlanid) + 1):
6002 if id not in used_ids:
6003 vlan_id = id
6004 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6005 return vlan_id
6006 if vlan_id is None:
6007 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6008
6009
6010 def get_obj(self, content, vimtype, name):
6011 """
6012 Get the vsphere object associated with a given text name
6013 """
6014 obj = None
6015 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6016 for item in container.view:
6017 if item.name == name:
6018 obj = item
6019 break
6020 return obj
6021
6022
6023 def insert_media_to_vm(self, vapp, image_id):
6024 """
6025 Method to insert media CD-ROM (ISO image) from catalog to vm.
6026 vapp - vapp object to get vm id
6027 Image_id - image id for cdrom to be inerted to vm
6028 """
6029 # create connection object
6030 vca = self.connect()
6031 try:
6032 # fetching catalog details
6033 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6034 if vca._session:
6035 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6036 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6037 response = self.perform_request(req_type='GET',
6038 url=rest_url,
6039 headers=headers)
6040
6041 if response.status_code != 200:
6042 self.logger.error("REST call {} failed reason : {}"\
6043 "status code : {}".format(url_rest_call,
6044 response.content,
6045 response.status_code))
6046 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6047 "catalog details")
6048 # searching iso name and id
6049 iso_name,media_id = self.get_media_details(vca, response.content)
6050
6051 if iso_name and media_id:
6052 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6053 <ns6:MediaInsertOrEjectParams
6054 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6055 <ns6:Media
6056 type="application/vnd.vmware.vcloud.media+xml"
6057 name="{}.iso"
6058 id="urn:vcloud:media:{}"
6059 href="https://{}/api/media/{}"/>
6060 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6061 self.url,media_id)
6062
6063 for vms in vapp.get_all_vms():
6064 vm_id = vms.get('id').split(':')[-1]
6065
6066 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6067 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6068
6069 response = self.perform_request(req_type='POST',
6070 url=rest_url,
6071 data=data,
6072 headers=headers)
6073
6074 if response.status_code != 202:
6075 self.logger.error("Failed to insert CD-ROM to vm")
6076 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
6077 "ISO image to vm")
6078 else:
6079 task = self.get_task_from_response(response.content)
6080 result = self.client.get_task_monitor().wait_for_success(task=task)
6081 if result.get('status') == 'success':
6082 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6083 " image to vm {}".format(vm_id))
6084
6085 except Exception as exp:
6086 self.logger.error("insert_media_to_vm() : exception occurred "\
6087 "while inserting media CD-ROM")
6088 raise vimconn.vimconnException(message=exp)
6089
6090
6091 def get_media_details(self, vca, content):
6092 """
6093 Method to get catalog item details
6094 vca - connection object
6095 content - Catalog details
6096 Return - Media name, media id
6097 """
6098 cataloghref_list = []
6099 try:
6100 if content:
6101 vm_list_xmlroot = XmlElementTree.fromstring(content)
6102 for child in vm_list_xmlroot.iter():
6103 if 'CatalogItem' in child.tag:
6104 cataloghref_list.append(child.attrib.get('href'))
6105 if cataloghref_list is not None:
6106 for href in cataloghref_list:
6107 if href:
6108 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6109 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6110 response = self.perform_request(req_type='GET',
6111 url=href,
6112 headers=headers)
6113 if response.status_code != 200:
6114 self.logger.error("REST call {} failed reason : {}"\
6115 "status code : {}".format(href,
6116 response.content,
6117 response.status_code))
6118 raise vimconn.vimconnException("get_media_details : Failed to get "\
6119 "catalogitem details")
6120 list_xmlroot = XmlElementTree.fromstring(response.content)
6121 for child in list_xmlroot.iter():
6122 if 'Entity' in child.tag:
6123 if 'media' in child.attrib.get('href'):
6124 name = child.attrib.get('name')
6125 media_id = child.attrib.get('href').split('/').pop()
6126 return name,media_id
6127 else:
6128 self.logger.debug("Media name and id not found")
6129 return False,False
6130 except Exception as exp:
6131 self.logger.error("get_media_details : exception occurred "\
6132 "getting media details")
6133 raise vimconn.vimconnException(message=exp)
6134
6135
6136 def retry_rest(self, method, url, add_headers=None, data=None):
6137 """ Method to get Token & retry respective REST request
6138 Args:
6139 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6140 url - request url to be used
6141 add_headers - Additional headers (optional)
6142 data - Request payload data to be passed in request
6143 Returns:
6144 response - Response of request
6145 """
6146 response = None
6147
6148 #Get token
6149 self.get_token()
6150
6151 if self.client._session:
6152 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6153 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6154
6155 if add_headers:
6156 headers.update(add_headers)
6157
6158 if method == 'GET':
6159 response = self.perform_request(req_type='GET',
6160 url=url,
6161 headers=headers)
6162 elif method == 'PUT':
6163 response = self.perform_request(req_type='PUT',
6164 url=url,
6165 headers=headers,
6166 data=data)
6167 elif method == 'POST':
6168 response = self.perform_request(req_type='POST',
6169 url=url,
6170 headers=headers,
6171 data=data)
6172 elif method == 'DELETE':
6173 response = self.perform_request(req_type='DELETE',
6174 url=url,
6175 headers=headers)
6176 return response
6177
6178
6179 def get_token(self):
6180 """ Generate a new token if expired
6181
6182 Returns:
6183 The return client object that letter can be used to connect to vCloud director as admin for VDC
6184 """
6185 try:
6186 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6187 self.user,
6188 self.org_name))
6189 host = self.url
6190 client = Client(host, verify_ssl_certs=False)
6191 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6192 # connection object
6193 self.client = client
6194
6195 except:
6196 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6197 "{} as user: {}".format(self.org_name, self.user))
6198
6199 if not client:
6200 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6201
6202
6203 def get_vdc_details(self):
6204 """ Get VDC details using pyVcloud Lib
6205
6206 Returns org and vdc object
6207 """
6208 org = Org(self.client, resource=self.client.get_org())
6209 vdc = org.get_vdc(self.tenant_name)
6210
6211 #Retry once, if failed by refreshing token
6212 if vdc is None:
6213 self.get_token()
6214 vdc = org.get_vdc(self.tenant_name)
6215
6216 return org, vdc
6217
6218
6219 def perform_request(self, req_type, url, headers=None, data=None):
6220 """Perform the POST/PUT/GET/DELETE request."""
6221
6222 #Log REST request details
6223 self.log_request(req_type, url=url, headers=headers, data=data)
6224 # perform request and return its result
6225 if req_type == 'GET':
6226 response = requests.get(url=url,
6227 headers=headers,
6228 verify=False)
6229 elif req_type == 'PUT':
6230 response = requests.put(url=url,
6231 headers=headers,
6232 data=data,
6233 verify=False)
6234 elif req_type == 'POST':
6235 response = requests.post(url=url,
6236 headers=headers,
6237 data=data,
6238 verify=False)
6239 elif req_type == 'DELETE':
6240 response = requests.delete(url=url,
6241 headers=headers,
6242 verify=False)
6243 #Log the REST response
6244 self.log_response(response)
6245
6246 return response
6247
6248
6249 def log_request(self, req_type, url=None, headers=None, data=None):
6250 """Logs REST request details"""
6251
6252 if req_type is not None:
6253 self.logger.debug("Request type: {}".format(req_type))
6254
6255 if url is not None:
6256 self.logger.debug("Request url: {}".format(url))
6257
6258 if headers is not None:
6259 for header in headers:
6260 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6261
6262 if data is not None:
6263 self.logger.debug("Request data: {}".format(data))
6264
6265
6266 def log_response(self, response):
6267 """Logs REST response details"""
6268
6269 self.logger.debug("Response status code: {} ".format(response.status_code))
6270
6271
6272 def get_task_from_response(self, content):
6273 """
6274 content - API response content(response.content)
6275 return task object
6276 """
6277 xmlroot = XmlElementTree.fromstring(content)
6278 if xmlroot.tag.split('}')[1] == "Task":
6279 return xmlroot
6280 else:
6281 for ele in xmlroot:
6282 if ele.tag.split("}")[1] == "Tasks":
6283 task = ele[0]
6284 break
6285 return task
6286
6287
6288 def power_on_vapp(self,vapp_id, vapp_name):
6289 """
6290 vapp_id - vApp uuid
6291 vapp_name - vAapp name
6292 return - Task object
6293 """
6294 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6295 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6296
6297 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6298 vapp_id)
6299 response = self.perform_request(req_type='POST',
6300 url=poweron_href,
6301 headers=headers)
6302
6303 if response.status_code != 202:
6304 self.logger.error("REST call {} failed reason : {}"\
6305 "status code : {} ".format(poweron_href,
6306 response.content,
6307 response.status_code))
6308 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6309 "vApp {}".format(vapp_name))
6310 else:
6311 poweron_task = self.get_task_from_response(response.content)
6312 return poweron_task
6313
6314