82d6a69735d19f8d6d61d0b4ba037d6ad4954cdf
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 for network in networks:
675 vdc_network_id = network.get_id().split(":")
676 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
677 filter_dict["name"] = network.get_name()
678 filter_dict["id"] = vdc_network_id[3]
679 filter_dict["shared"] = network.get_IsShared()
680 filter_dict["tenant_id"] = vdc_id
681 if network.get_status() == 1:
682 filter_dict["admin_state_up"] = True
683 else:
684 filter_dict["admin_state_up"] = False
685 filter_dict["status"] = "ACTIVE"
686 filter_dict["type"] = "bridge"
687 self.logger.debug("Returning {}".format(filter_dict))
688 return filter_dict
689 except:
690 self.logger.debug("Error in get_network")
691 self.logger.debug(traceback.format_exc())
692
693 return filter_dict
694
695 def delete_network(self, net_id):
696 """
697 Method Deletes a tenant network from VIM, provide the network id.
698
699 Returns the network identifier or raise an exception
700 """
701
702 # ############# Stub code for SRIOV #################
703 # dvport_group = self.get_dvport_group(net_id)
704 # if dvport_group:
705 # #delete portgroup
706 # status = self.destroy_dvport_group(net_id)
707 # if status:
708 # # Remove vlanID from persistent info
709 # if net_id in self.persistent_info["used_vlanIDs"]:
710 # del self.persistent_info["used_vlanIDs"][net_id]
711 #
712 # return net_id
713
714 vcd_network = self.get_vcd_network(network_uuid=net_id)
715 if vcd_network is not None and vcd_network:
716 if self.delete_network_action(network_uuid=net_id):
717 return net_id
718 else:
719 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
720
721 def refresh_nets_status(self, net_list):
722 """Get the status of the networks
723 Params: the list of network identifiers
724 Returns a dictionary with:
725 net_id: #VIM id of this network
726 status: #Mandatory. Text with one of:
727 # DELETED (not found at vim)
728 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
729 # OTHER (Vim reported other status not understood)
730 # ERROR (VIM indicates an ERROR status)
731 # ACTIVE, INACTIVE, DOWN (admin down),
732 # BUILD (on building process)
733 #
734 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
735 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
736
737 """
738
739 dict_entry = {}
740 try:
741 for net in net_list:
742 errormsg = ''
743 vcd_network = self.get_vcd_network(network_uuid=net)
744 if vcd_network is not None and vcd_network:
745 if vcd_network['status'] == '1':
746 status = 'ACTIVE'
747 else:
748 status = 'DOWN'
749 else:
750 status = 'DELETED'
751 errormsg = 'Network not found.'
752
753 dict_entry[net] = {'status': status, 'error_msg': errormsg,
754 'vim_info': yaml.safe_dump(vcd_network)}
755 except:
756 self.logger.debug("Error in refresh_nets_status")
757 self.logger.debug(traceback.format_exc())
758
759 return dict_entry
760
761 def get_flavor(self, flavor_id):
762 """Obtain flavor details from the VIM
763 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
764 """
765 if flavor_id not in vimconnector.flavorlist:
766 raise vimconn.vimconnNotFoundException("Flavor not found.")
767 return vimconnector.flavorlist[flavor_id]
768
769 def new_flavor(self, flavor_data):
770 """Adds a tenant flavor to VIM
771 flavor_data contains a dictionary with information, keys:
772 name: flavor name
773 ram: memory (cloud type) in MBytes
774 vpcus: cpus (cloud type)
775 extended: EPA parameters
776 - numas: #items requested in same NUMA
777 memory: number of 1G huge pages memory
778 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
779 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
780 - name: interface name
781 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
782 bandwidth: X Gbps; requested guarantee bandwidth
783 vpci: requested virtual PCI address
784 disk: disk size
785 is_public:
786 #TODO to concrete
787 Returns the flavor identifier"""
788
789 # generate a new uuid put to internal dict and return it.
790 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
791 new_flavor=flavor_data
792 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
793 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
794 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
795
796 if not isinstance(ram, int):
797 raise vimconn.vimconnException("Non-integer value for ram")
798 elif not isinstance(cpu, int):
799 raise vimconn.vimconnException("Non-integer value for cpu")
800 elif not isinstance(disk, int):
801 raise vimconn.vimconnException("Non-integer value for disk")
802
803 extended_flv = flavor_data.get("extended")
804 if extended_flv:
805 numas=extended_flv.get("numas")
806 if numas:
807 for numa in numas:
808 #overwrite ram and vcpus
809 ram = numa['memory']*1024
810 if 'paired-threads' in numa:
811 cpu = numa['paired-threads']*2
812 elif 'cores' in numa:
813 cpu = numa['cores']
814 elif 'threads' in numa:
815 cpu = numa['threads']
816
817 new_flavor[FLAVOR_RAM_KEY] = ram
818 new_flavor[FLAVOR_VCPUS_KEY] = cpu
819 new_flavor[FLAVOR_DISK_KEY] = disk
820 # generate a new uuid put to internal dict and return it.
821 flavor_id = uuid.uuid4()
822 vimconnector.flavorlist[str(flavor_id)] = new_flavor
823 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
824
825 return str(flavor_id)
826
827 def delete_flavor(self, flavor_id):
828 """Deletes a tenant flavor from VIM identify by its id
829
830 Returns the used id or raise an exception
831 """
832 if flavor_id not in vimconnector.flavorlist:
833 raise vimconn.vimconnNotFoundException("Flavor not found.")
834
835 vimconnector.flavorlist.pop(flavor_id, None)
836 return flavor_id
837
838 def new_image(self, image_dict):
839 """
840 Adds a tenant image to VIM
841 Returns:
842 200, image-id if the image is created
843 <0, message if there is an error
844 """
845
846 return self.get_image_id_from_path(image_dict['location'])
847
848 def delete_image(self, image_id):
849 """
850 Deletes a tenant image from VIM
851 Args:
852 image_id is ID of Image to be deleted
853 Return:
854 returns the image identifier in UUID format or raises an exception on error
855 """
856 vca = self.connect_as_admin()
857 if not vca:
858 raise vimconn.vimconnConnectionException("self.connect() is failed")
859 # Get Catalog details
860 url_list = [self.vca.host, '/api/catalog/', image_id]
861 catalog_herf = ''.join(url_list)
862 response = Http.get(url=catalog_herf,
863 headers=vca.vcloud_session.get_vcloud_headers(),
864 verify=vca.verify,
865 logger=vca.logger)
866
867 if response.status_code != requests.codes.ok:
868 self.logger.debug("delete_image():GET REST API call {} failed. "\
869 "Return status code {}".format(catalog_herf,
870 response.status_code))
871 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
872
873 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
874 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
875 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
876
877 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
878 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
879 for catalogItem in catalogItems:
880 catalogItem_href = catalogItem.attrib['href']
881
882 #GET details of catalogItem
883 response = Http.get(url=catalogItem_href,
884 headers=vca.vcloud_session.get_vcloud_headers(),
885 verify=vca.verify,
886 logger=vca.logger)
887
888 if response.status_code != requests.codes.ok:
889 self.logger.debug("delete_image():GET REST API call {} failed. "\
890 "Return status code {}".format(catalog_herf,
891 response.status_code))
892 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
893 catalogItem,
894 image_id))
895
896 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
897 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
898 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
899 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
900
901 #Remove catalogItem
902 response = Http.delete(url= catalogitem_remove_href,
903 headers=vca.vcloud_session.get_vcloud_headers(),
904 verify=vca.verify,
905 logger=vca.logger)
906 if response.status_code == requests.codes.no_content:
907 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
908 else:
909 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
910
911 #Remove catalog
912 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
913 catalog_remove_herf = ''.join(url_list)
914 response = Http.delete(url= catalog_remove_herf,
915 headers=vca.vcloud_session.get_vcloud_headers(),
916 verify=vca.verify,
917 logger=vca.logger)
918
919 if response.status_code == requests.codes.no_content:
920 self.logger.debug("Deleted Catalog {}".format(image_id))
921 return image_id
922 else:
923 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
924
925
926 def catalog_exists(self, catalog_name, catalogs):
927 """
928
929 :param catalog_name:
930 :param catalogs:
931 :return:
932 """
933 for catalog in catalogs:
934 if catalog.name == catalog_name:
935 return True
936 return False
937
938 def create_vimcatalog(self, vca=None, catalog_name=None):
939 """ Create new catalog entry in vCloud director.
940
941 Args
942 vca: vCloud director.
943 catalog_name catalog that client wish to create. Note no validation done for a name.
944 Client must make sure that provide valid string representation.
945
946 Return (bool) True if catalog created.
947
948 """
949 try:
950 task = vca.create_catalog(catalog_name, catalog_name)
951 result = vca.block_until_completed(task)
952 if not result:
953 return False
954 catalogs = vca.get_catalogs()
955 except:
956 return False
957 return self.catalog_exists(catalog_name, catalogs)
958
959 # noinspection PyIncorrectDocstring
960 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
961 description='', progress=False, chunk_bytes=128 * 1024):
962 """
963 Uploads a OVF file to a vCloud catalog
964
965 :param chunk_bytes:
966 :param progress:
967 :param description:
968 :param image_name:
969 :param vca:
970 :param catalog_name: (str): The name of the catalog to upload the media.
971 :param media_file_name: (str): The name of the local media file to upload.
972 :return: (bool) True if the media file was successfully uploaded, false otherwise.
973 """
974 os.path.isfile(media_file_name)
975 statinfo = os.stat(media_file_name)
976
977 # find a catalog entry where we upload OVF.
978 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
979 # status change.
980 # if VCD can parse OVF we upload VMDK file
981 try:
982 for catalog in vca.get_catalogs():
983 if catalog_name != catalog.name:
984 continue
985 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
986 link.get_rel() == 'add', catalog.get_Link())
987 assert len(link) == 1
988 data = """
989 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
990 """ % (escape(catalog_name), escape(description))
991 headers = vca.vcloud_session.get_vcloud_headers()
992 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
993 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
994 if response.status_code == requests.codes.created:
995 catalogItem = XmlElementTree.fromstring(response.content)
996 entity = [child for child in catalogItem if
997 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
998 href = entity.get('href')
999 template = href
1000 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1001 verify=vca.verify, logger=self.logger)
1002
1003 if response.status_code == requests.codes.ok:
1004 media = mediaType.parseString(response.content, True)
1005 link = filter(lambda link: link.get_rel() == 'upload:default',
1006 media.get_Files().get_File()[0].get_Link())[0]
1007 headers = vca.vcloud_session.get_vcloud_headers()
1008 headers['Content-Type'] = 'Content-Type text/xml'
1009 response = Http.put(link.get_href(),
1010 data=open(media_file_name, 'rb'),
1011 headers=headers,
1012 verify=vca.verify, logger=self.logger)
1013 if response.status_code != requests.codes.ok:
1014 self.logger.debug(
1015 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1016 media_file_name))
1017 return False
1018
1019 # TODO fix this with aync block
1020 time.sleep(5)
1021
1022 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1023
1024 # uploading VMDK file
1025 # check status of OVF upload and upload remaining files.
1026 response = Http.get(template,
1027 headers=vca.vcloud_session.get_vcloud_headers(),
1028 verify=vca.verify,
1029 logger=self.logger)
1030
1031 if response.status_code == requests.codes.ok:
1032 media = mediaType.parseString(response.content, True)
1033 number_of_files = len(media.get_Files().get_File())
1034 for index in xrange(0, number_of_files):
1035 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1036 media.get_Files().get_File()[index].get_Link())
1037 for link in links_list:
1038 # we skip ovf since it already uploaded.
1039 if 'ovf' in link.get_href():
1040 continue
1041 # The OVF file and VMDK must be in a same directory
1042 head, tail = os.path.split(media_file_name)
1043 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1044 if not os.path.isfile(file_vmdk):
1045 return False
1046 statinfo = os.stat(file_vmdk)
1047 if statinfo.st_size == 0:
1048 return False
1049 hrefvmdk = link.get_href()
1050
1051 if progress:
1052 print("Uploading file: {}".format(file_vmdk))
1053 if progress:
1054 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1055 FileTransferSpeed()]
1056 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1057
1058 bytes_transferred = 0
1059 f = open(file_vmdk, 'rb')
1060 while bytes_transferred < statinfo.st_size:
1061 my_bytes = f.read(chunk_bytes)
1062 if len(my_bytes) <= chunk_bytes:
1063 headers = vca.vcloud_session.get_vcloud_headers()
1064 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1065 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1066 headers['Content-Length'] = str(len(my_bytes))
1067 response = Http.put(hrefvmdk,
1068 headers=headers,
1069 data=my_bytes,
1070 verify=vca.verify,
1071 logger=None)
1072
1073 if response.status_code == requests.codes.ok:
1074 bytes_transferred += len(my_bytes)
1075 if progress:
1076 progress_bar.update(bytes_transferred)
1077 else:
1078 self.logger.debug(
1079 'file upload failed with error: [%s] %s' % (response.status_code,
1080 response.content))
1081
1082 f.close()
1083 return False
1084 f.close()
1085 if progress:
1086 progress_bar.finish()
1087 time.sleep(10)
1088 return True
1089 else:
1090 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1091 format(catalog_name, media_file_name))
1092 return False
1093 except Exception as exp:
1094 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1095 .format(catalog_name,media_file_name, exp))
1096 raise vimconn.vimconnException(
1097 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1098 .format(catalog_name,media_file_name, exp))
1099
1100 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1101 return False
1102
1103 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1104 """Upload media file"""
1105 # TODO add named parameters for readability
1106
1107 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1108 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1109
1110 def validate_uuid4(self, uuid_string=None):
1111 """ Method validate correct format of UUID.
1112
1113 Return: true if string represent valid uuid
1114 """
1115 try:
1116 val = uuid.UUID(uuid_string, version=4)
1117 except ValueError:
1118 return False
1119 return True
1120
1121 def get_catalogid(self, catalog_name=None, catalogs=None):
1122 """ Method check catalog and return catalog ID in UUID format.
1123
1124 Args
1125 catalog_name: catalog name as string
1126 catalogs: list of catalogs.
1127
1128 Return: catalogs uuid
1129 """
1130
1131 for catalog in catalogs:
1132 if catalog.name == catalog_name:
1133 catalog_id = catalog.get_id().split(":")
1134 return catalog_id[3]
1135 return None
1136
1137 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1138 """ Method check catalog and return catalog name lookup done by catalog UUID.
1139
1140 Args
1141 catalog_name: catalog name as string
1142 catalogs: list of catalogs.
1143
1144 Return: catalogs name or None
1145 """
1146
1147 if not self.validate_uuid4(uuid_string=catalog_uuid):
1148 return None
1149
1150 for catalog in catalogs:
1151 catalog_id = catalog.get_id().split(":")[3]
1152 if catalog_id == catalog_uuid:
1153 return catalog.name
1154 return None
1155
1156 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1157 """ Method check catalog and return catalog name lookup done by catalog UUID.
1158
1159 Args
1160 catalog_name: catalog name as string
1161 catalogs: list of catalogs.
1162
1163 Return: catalogs name or None
1164 """
1165
1166 if not self.validate_uuid4(uuid_string=catalog_uuid):
1167 return None
1168
1169 for catalog in catalogs:
1170 catalog_id = catalog.get_id().split(":")[3]
1171 if catalog_id == catalog_uuid:
1172 return catalog
1173 return None
1174
1175 def get_image_id_from_path(self, path=None, progress=False):
1176 """ Method upload OVF image to vCloud director.
1177
1178 Each OVF image represented as single catalog entry in vcloud director.
1179 The method check for existing catalog entry. The check done by file name without file extension.
1180
1181 if given catalog name already present method will respond with existing catalog uuid otherwise
1182 it will create new catalog entry and upload OVF file to newly created catalog.
1183
1184 If method can't create catalog entry or upload a file it will throw exception.
1185
1186 Method accept boolean flag progress that will output progress bar. It useful method
1187 for standalone upload use case. In case to test large file upload.
1188
1189 Args
1190 path: - valid path to OVF file.
1191 progress - boolean progress bar show progress bar.
1192
1193 Return: if image uploaded correct method will provide image catalog UUID.
1194 """
1195
1196 if not path:
1197 raise vimconn.vimconnException("Image path can't be None.")
1198
1199 if not os.path.isfile(path):
1200 raise vimconn.vimconnException("Can't read file. File not found.")
1201
1202 if not os.access(path, os.R_OK):
1203 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1204
1205 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1206
1207 dirpath, filename = os.path.split(path)
1208 flname, file_extension = os.path.splitext(path)
1209 if file_extension != '.ovf':
1210 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1211 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1212
1213 catalog_name = os.path.splitext(filename)[0]
1214 catalog_md5_name = hashlib.md5(path).hexdigest()
1215 self.logger.debug("File name {} Catalog Name {} file path {} "
1216 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1217
1218 try:
1219 catalogs = self.vca.get_catalogs()
1220 except Exception as exp:
1221 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1222 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1223
1224 if len(catalogs) == 0:
1225 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1226 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1227 if not result:
1228 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1229 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1230 media_name=filename, medial_file_name=path, progress=progress)
1231 if not result:
1232 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1233 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1234 else:
1235 for catalog in catalogs:
1236 # search for existing catalog if we find same name we return ID
1237 # TODO optimize this
1238 if catalog.name == catalog_md5_name:
1239 self.logger.debug("Found existing catalog entry for {} "
1240 "catalog id {}".format(catalog_name,
1241 self.get_catalogid(catalog_md5_name, catalogs)))
1242 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1243
1244 # if we didn't find existing catalog we create a new one and upload image.
1245 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1246 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1247 if not result:
1248 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1249
1250 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1251 media_name=filename, medial_file_name=path, progress=progress)
1252 if not result:
1253 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1254
1255 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1256
1257 def get_image_list(self, filter_dict={}):
1258 '''Obtain tenant images from VIM
1259 Filter_dict can be:
1260 name: image name
1261 id: image uuid
1262 checksum: image checksum
1263 location: image path
1264 Returns the image list of dictionaries:
1265 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1266 List can be empty
1267 '''
1268
1269 try:
1270 image_list = []
1271 catalogs = self.vca.get_catalogs()
1272 if len(catalogs) == 0:
1273 return image_list
1274 else:
1275 for catalog in catalogs:
1276 catalog_uuid = catalog.get_id().split(":")[3]
1277 name = catalog.name
1278 filtered_dict = {}
1279 if filter_dict.get("name") and filter_dict["name"] != name:
1280 continue
1281 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1282 continue
1283 filtered_dict ["name"] = name
1284 filtered_dict ["id"] = catalog_uuid
1285 image_list.append(filtered_dict)
1286
1287 self.logger.debug("List of already created catalog items: {}".format(image_list))
1288 return image_list
1289 except Exception as exp:
1290 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1291
1292 def get_vappid(self, vdc=None, vapp_name=None):
1293 """ Method takes vdc object and vApp name and returns vapp uuid or None
1294
1295 Args:
1296 vdc: The VDC object.
1297 vapp_name: is application vappp name identifier
1298
1299 Returns:
1300 The return vApp name otherwise None
1301 """
1302 if vdc is None or vapp_name is None:
1303 return None
1304 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1305 try:
1306 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1307 vdc.ResourceEntities.ResourceEntity)
1308 if len(refs) == 1:
1309 return refs[0].href.split("vapp")[1][1:]
1310 except Exception as e:
1311 self.logger.exception(e)
1312 return False
1313 return None
1314
1315 def check_vapp(self, vdc=None, vapp_uuid=None):
1316 """ Method Method returns True or False if vapp deployed in vCloud director
1317
1318 Args:
1319 vca: Connector to VCA
1320 vdc: The VDC object.
1321 vappid: vappid is application identifier
1322
1323 Returns:
1324 The return True if vApp deployed
1325 :param vdc:
1326 :param vapp_uuid:
1327 """
1328 try:
1329 refs = filter(lambda ref:
1330 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1331 vdc.ResourceEntities.ResourceEntity)
1332 for ref in refs:
1333 vappid = ref.href.split("vapp")[1][1:]
1334 # find vapp with respected vapp uuid
1335 if vappid == vapp_uuid:
1336 return True
1337 except Exception as e:
1338 self.logger.exception(e)
1339 return False
1340 return False
1341
1342 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1343 """Method returns vApp name from vCD and lookup done by vapp_id.
1344
1345 Args:
1346 vca: Connector to VCA
1347 vdc: The VDC object.
1348 vapp_uuid: vappid is application identifier
1349
1350 Returns:
1351 The return vApp name otherwise None
1352 """
1353
1354 try:
1355 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1356 vdc.ResourceEntities.ResourceEntity)
1357 for ref in refs:
1358 # we care only about UUID the rest doesn't matter
1359 vappid = ref.href.split("vapp")[1][1:]
1360 if vappid == vapp_uuid:
1361 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1362 logger=self.logger)
1363
1364 #Retry login if session expired & retry sending request
1365 if response.status_code == 403:
1366 response = self.retry_rest('GET', ref.href)
1367
1368 tree = XmlElementTree.fromstring(response.content)
1369 return tree.attrib['name']
1370 except Exception as e:
1371 self.logger.exception(e)
1372 return None
1373 return None
1374
1375 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1376 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1377 """Adds a VM instance to VIM
1378 Params:
1379 'start': (boolean) indicates if VM must start or created in pause mode.
1380 'image_id','flavor_id': image and flavor VIM id to use for the VM
1381 'net_list': list of interfaces, each one is a dictionary with:
1382 'name': (optional) name for the interface.
1383 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1384 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1385 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1386 'mac_address': (optional) mac address to assign to this interface
1387 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1388 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1389 'type': (mandatory) can be one of:
1390 'virtual', in this case always connected to a network of type 'net_type=bridge'
1391 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1392 can created unconnected
1393 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1394 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1395 are allocated on the same physical NIC
1396 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1397 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1398 or True, it must apply the default VIM behaviour
1399 After execution the method will add the key:
1400 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1401 interface. 'net_list' is modified
1402 'cloud_config': (optional) dictionary with:
1403 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1404 'users': (optional) list of users to be inserted, each item is a dict with:
1405 'name': (mandatory) user name,
1406 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1407 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1408 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1409 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1410 'dest': (mandatory) string with the destination absolute path
1411 'encoding': (optional, by default text). Can be one of:
1412 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1413 'content' (mandatory): string with the content of the file
1414 'permissions': (optional) string with file permissions, typically octal notation '0644'
1415 'owner': (optional) file owner, string with the format 'owner:group'
1416 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1417 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1418 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1419 'size': (mandatory) string with the size of the disk in GB
1420 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1421 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1422 availability_zone_index is None
1423 Returns the instance identifier or raises an exception on error
1424 """
1425 self.logger.info("Creating new instance for entry {}".format(name))
1426 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1427 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1428
1429 #new vm name = vmname + tenant_id + uuid
1430 new_vm_name = [name, '-', str(uuid.uuid4())]
1431 vmname_andid = ''.join(new_vm_name)
1432
1433 # if vm already deployed we return existing uuid
1434 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1435 # if vapp_uuid is not None:
1436 # return vapp_uuid
1437
1438 # we check for presence of VDC, Catalog entry and Flavor.
1439 vdc = self.get_vdc_details()
1440 if vdc is None:
1441 raise vimconn.vimconnNotFoundException(
1442 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1443 catalogs = self.vca.get_catalogs()
1444 if catalogs is None:
1445 #Retry once, if failed by refreshing token
1446 self.get_token()
1447 catalogs = self.vca.get_catalogs()
1448 if catalogs is None:
1449 raise vimconn.vimconnNotFoundException(
1450 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1451
1452 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1453 if catalog_hash_name:
1454 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1455 else:
1456 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1457 "(Failed retrieve catalog information {})".format(name, image_id))
1458
1459
1460 # Set vCPU and Memory based on flavor.
1461 vm_cpus = None
1462 vm_memory = None
1463 vm_disk = None
1464 numas = None
1465
1466 if flavor_id is not None:
1467 if flavor_id not in vimconnector.flavorlist:
1468 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1469 "Failed retrieve flavor information "
1470 "flavor id {}".format(name, flavor_id))
1471 else:
1472 try:
1473 flavor = vimconnector.flavorlist[flavor_id]
1474 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1475 vm_memory = flavor[FLAVOR_RAM_KEY]
1476 vm_disk = flavor[FLAVOR_DISK_KEY]
1477 extended = flavor.get("extended", None)
1478 if extended:
1479 numas=extended.get("numas", None)
1480
1481 except Exception as exp:
1482 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1483
1484 # image upload creates template name as catalog name space Template.
1485 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1486 power_on = 'false'
1487 if start:
1488 power_on = 'true'
1489
1490 # client must provide at least one entry in net_list if not we report error
1491 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1492 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1493 primary_net = None
1494 primary_netname = None
1495 network_mode = 'bridged'
1496 if net_list is not None and len(net_list) > 0:
1497 for net in net_list:
1498 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1499 primary_net = net
1500 if primary_net is None:
1501 primary_net = net_list[0]
1502
1503 try:
1504 primary_net_id = primary_net['net_id']
1505 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1506 if 'name' in network_dict:
1507 primary_netname = network_dict['name']
1508
1509 except KeyError:
1510 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1511 else:
1512 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1513
1514 # use: 'data', 'bridge', 'mgmt'
1515 # create vApp. Set vcpu and ram based on flavor id.
1516 try:
1517 for retry in (1,2):
1518 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1519 self.get_catalogbyid(image_id, catalogs),
1520 network_name=None, # None while creating vapp
1521 network_mode=network_mode,
1522 vm_name=vmname_andid,
1523 vm_cpus=vm_cpus, # can be None if flavor is None
1524 vm_memory=vm_memory) # can be None if flavor is None
1525
1526 if not vapptask and retry==1:
1527 self.get_token() # Retry getting token
1528 continue
1529 else:
1530 break
1531
1532 if vapptask is None or vapptask is False:
1533 raise vimconn.vimconnUnexpectedResponse(
1534 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1535 if type(vapptask) is VappTask:
1536 self.vca.block_until_completed(vapptask)
1537
1538 except Exception as exp:
1539 raise vimconn.vimconnUnexpectedResponse(
1540 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1541
1542 # we should have now vapp in undeployed state.
1543 try:
1544 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1545
1546 except Exception as exp:
1547 raise vimconn.vimconnUnexpectedResponse(
1548 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1549 .format(vmname_andid, exp))
1550
1551 if vapp_uuid is None:
1552 raise vimconn.vimconnUnexpectedResponse(
1553 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1554 vmname_andid))
1555
1556 #Add PCI passthrough/SRIOV configrations
1557 vm_obj = None
1558 pci_devices_info = []
1559 sriov_net_info = []
1560 reserve_memory = False
1561
1562 for net in net_list:
1563 if net["type"]=="PF":
1564 pci_devices_info.append(net)
1565 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1566 sriov_net_info.append(net)
1567
1568 #Add PCI
1569 if len(pci_devices_info) > 0:
1570 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1571 vmname_andid ))
1572 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1573 pci_devices_info,
1574 vmname_andid)
1575 if PCI_devices_status:
1576 self.logger.info("Added PCI devives {} to VM {}".format(
1577 pci_devices_info,
1578 vmname_andid)
1579 )
1580 reserve_memory = True
1581 else:
1582 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1583 pci_devices_info,
1584 vmname_andid)
1585 )
1586
1587 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1588 # Modify vm disk
1589 if vm_disk:
1590 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1591 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1592 if result :
1593 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1594
1595 #Add new or existing disks to vApp
1596 if disk_list:
1597 added_existing_disk = False
1598 for disk in disk_list:
1599 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1600 image_id = disk['image_id']
1601 # Adding CD-ROM to VM
1602 # will revisit code once specification ready to support this feature
1603 self.insert_media_to_vm(vapp, image_id)
1604 elif "image_id" in disk and disk["image_id"] is not None:
1605 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1606 disk["image_id"] , vapp_uuid))
1607 self.add_existing_disk(catalogs=catalogs,
1608 image_id=disk["image_id"],
1609 size = disk["size"],
1610 template_name=templateName,
1611 vapp_uuid=vapp_uuid
1612 )
1613 added_existing_disk = True
1614 else:
1615 #Wait till added existing disk gets reflected into vCD database/API
1616 if added_existing_disk:
1617 time.sleep(5)
1618 added_existing_disk = False
1619 self.add_new_disk(vapp_uuid, disk['size'])
1620
1621 if numas:
1622 # Assigning numa affinity setting
1623 for numa in numas:
1624 if 'paired-threads-id' in numa:
1625 paired_threads_id = numa['paired-threads-id']
1626 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1627
1628 # add NICs & connect to networks in netlist
1629 try:
1630 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1631 nicIndex = 0
1632 primary_nic_index = 0
1633 for net in net_list:
1634 # openmano uses network id in UUID format.
1635 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1636 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1637 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1638
1639 if 'net_id' not in net:
1640 continue
1641
1642 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1643 #Same will be returned in refresh_vms_status() as vim_interface_id
1644 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1645
1646 interface_net_id = net['net_id']
1647 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1648 interface_network_mode = net['use']
1649
1650 if interface_network_mode == 'mgmt':
1651 primary_nic_index = nicIndex
1652
1653 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1654 - DHCP (The IP address is obtained from a DHCP service.)
1655 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1656 - NONE (No IP addressing mode specified.)"""
1657
1658 if primary_netname is not None:
1659 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1660 if len(nets) == 1:
1661 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1662
1663 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1664 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1665 if type(task) is GenericTask:
1666 self.vca.block_until_completed(task)
1667 # connect network to VM - with all DHCP by default
1668
1669 type_list = ['PF','VF','VFnotShared']
1670 if 'type' in net and net['type'] not in type_list:
1671 # fetching nic type from vnf
1672 if 'model' in net:
1673 nic_type = net['model']
1674 self.logger.info("new_vminstance(): adding network adapter "\
1675 "to a network {}".format(nets[0].name))
1676 self.add_network_adapter_to_vms(vapp, nets[0].name,
1677 primary_nic_index,
1678 nicIndex,
1679 net,
1680 nic_type=nic_type)
1681 else:
1682 self.logger.info("new_vminstance(): adding network adapter "\
1683 "to a network {}".format(nets[0].name))
1684 self.add_network_adapter_to_vms(vapp, nets[0].name,
1685 primary_nic_index,
1686 nicIndex,
1687 net)
1688 nicIndex += 1
1689
1690 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1691 # cloud-init for ssh-key injection
1692 if cloud_config:
1693 self.cloud_init(vapp,cloud_config)
1694
1695 # deploy and power on vm
1696 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1697 deploytask = vapp.deploy(powerOn=False)
1698 if type(deploytask) is GenericTask:
1699 self.vca.block_until_completed(deploytask)
1700
1701 # ############# Stub code for SRIOV #################
1702 #Add SRIOV
1703 # if len(sriov_net_info) > 0:
1704 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1705 # vmname_andid ))
1706 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1707 # sriov_net_info,
1708 # vmname_andid)
1709 # if sriov_status:
1710 # self.logger.info("Added SRIOV {} to VM {}".format(
1711 # sriov_net_info,
1712 # vmname_andid)
1713 # )
1714 # reserve_memory = True
1715 # else:
1716 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1717 # sriov_net_info,
1718 # vmname_andid)
1719 # )
1720
1721 # If VM has PCI devices or SRIOV reserve memory for VM
1722 if reserve_memory:
1723 memReserve = vm_obj.config.hardware.memoryMB
1724 spec = vim.vm.ConfigSpec()
1725 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1726 task = vm_obj.ReconfigVM_Task(spec=spec)
1727 if task:
1728 result = self.wait_for_vcenter_task(task, vcenter_conect)
1729 self.logger.info("Reserved memory {} MB for "
1730 "VM VM status: {}".format(str(memReserve), result))
1731 else:
1732 self.logger.info("Fail to reserved memory {} to VM {}".format(
1733 str(memReserve), str(vm_obj)))
1734
1735 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1736
1737 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1738 poweron_task = vapp.poweron()
1739 if type(poweron_task) is GenericTask:
1740 self.vca.block_until_completed(poweron_task)
1741
1742 except Exception as exp :
1743 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1744 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1745 .format(name, exp))
1746 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1747 .format(name, exp))
1748
1749 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1750 wait_time = 0
1751 vapp_uuid = None
1752 while wait_time <= MAX_WAIT_TIME:
1753 try:
1754 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1755 except Exception as exp:
1756 raise vimconn.vimconnUnexpectedResponse(
1757 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1758 .format(vmname_andid, exp))
1759
1760 if vapp and vapp.me.deployed:
1761 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1762 break
1763 else:
1764 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1765 time.sleep(INTERVAL_TIME)
1766
1767 wait_time +=INTERVAL_TIME
1768
1769 if vapp_uuid is not None:
1770 return vapp_uuid
1771 else:
1772 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1773
1774 ##
1775 ##
1776 ## based on current discussion
1777 ##
1778 ##
1779 ## server:
1780 # created: '2016-09-08T11:51:58'
1781 # description: simple-instance.linux1.1
1782 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1783 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1784 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1785 # status: ACTIVE
1786 # error_msg:
1787 # interfaces: …
1788 #
1789 def get_vminstance(self, vim_vm_uuid=None):
1790 """Returns the VM instance information from VIM"""
1791
1792 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1793
1794 vdc = self.get_vdc_details()
1795 if vdc is None:
1796 raise vimconn.vimconnConnectionException(
1797 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1798
1799 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1800 if not vm_info_dict:
1801 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1802 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1803
1804 status_key = vm_info_dict['status']
1805 error = ''
1806 try:
1807 vm_dict = {'created': vm_info_dict['created'],
1808 'description': vm_info_dict['name'],
1809 'status': vcdStatusCode2manoFormat[int(status_key)],
1810 'hostId': vm_info_dict['vmuuid'],
1811 'error_msg': error,
1812 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1813
1814 if 'interfaces' in vm_info_dict:
1815 vm_dict['interfaces'] = vm_info_dict['interfaces']
1816 else:
1817 vm_dict['interfaces'] = []
1818 except KeyError:
1819 vm_dict = {'created': '',
1820 'description': '',
1821 'status': vcdStatusCode2manoFormat[int(-1)],
1822 'hostId': vm_info_dict['vmuuid'],
1823 'error_msg': "Inconsistency state",
1824 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1825
1826 return vm_dict
1827
1828 def delete_vminstance(self, vm__vim_uuid):
1829 """Method poweroff and remove VM instance from vcloud director network.
1830
1831 Args:
1832 vm__vim_uuid: VM UUID
1833
1834 Returns:
1835 Returns the instance identifier
1836 """
1837
1838 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1839
1840 vdc = self.get_vdc_details()
1841 if vdc is None:
1842 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1843 self.tenant_name))
1844 raise vimconn.vimconnException(
1845 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1846
1847 try:
1848 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1849 if vapp_name is None:
1850 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1851 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1852 else:
1853 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1854
1855 # Delete vApp and wait for status change if task executed and vApp is None.
1856 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1857
1858 if vapp:
1859 if vapp.me.deployed:
1860 self.logger.info("Powering off vApp {}".format(vapp_name))
1861 #Power off vApp
1862 powered_off = False
1863 wait_time = 0
1864 while wait_time <= MAX_WAIT_TIME:
1865 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1866 if not vapp:
1867 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1868 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1869
1870 power_off_task = vapp.poweroff()
1871 if type(power_off_task) is GenericTask:
1872 result = self.vca.block_until_completed(power_off_task)
1873 if result:
1874 powered_off = True
1875 break
1876 else:
1877 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1878 time.sleep(INTERVAL_TIME)
1879
1880 wait_time +=INTERVAL_TIME
1881 if not powered_off:
1882 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1883 else:
1884 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1885
1886 #Undeploy vApp
1887 self.logger.info("Undeploy vApp {}".format(vapp_name))
1888 wait_time = 0
1889 undeployed = False
1890 while wait_time <= MAX_WAIT_TIME:
1891 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1892 if not vapp:
1893 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1894 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1895 undeploy_task = vapp.undeploy(action='powerOff')
1896
1897 if type(undeploy_task) is GenericTask:
1898 result = self.vca.block_until_completed(undeploy_task)
1899 if result:
1900 undeployed = True
1901 break
1902 else:
1903 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1904 time.sleep(INTERVAL_TIME)
1905
1906 wait_time +=INTERVAL_TIME
1907
1908 if not undeployed:
1909 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1910
1911 # delete vapp
1912 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1913 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1914
1915 if vapp is not None:
1916 wait_time = 0
1917 result = False
1918
1919 while wait_time <= MAX_WAIT_TIME:
1920 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1921 if not vapp:
1922 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1923 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1924
1925 delete_task = vapp.delete()
1926
1927 if type(delete_task) is GenericTask:
1928 self.vca.block_until_completed(delete_task)
1929 result = self.vca.block_until_completed(delete_task)
1930 if result:
1931 break
1932 else:
1933 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1934 time.sleep(INTERVAL_TIME)
1935
1936 wait_time +=INTERVAL_TIME
1937
1938 if not result:
1939 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1940
1941 except:
1942 self.logger.debug(traceback.format_exc())
1943 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1944
1945 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1946 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1947 return vm__vim_uuid
1948 else:
1949 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1950
1951 def refresh_vms_status(self, vm_list):
1952 """Get the status of the virtual machines and their interfaces/ports
1953 Params: the list of VM identifiers
1954 Returns a dictionary with:
1955 vm_id: #VIM id of this Virtual Machine
1956 status: #Mandatory. Text with one of:
1957 # DELETED (not found at vim)
1958 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1959 # OTHER (Vim reported other status not understood)
1960 # ERROR (VIM indicates an ERROR status)
1961 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1962 # CREATING (on building process), ERROR
1963 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1964 #
1965 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1966 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1967 interfaces:
1968 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1969 mac_address: #Text format XX:XX:XX:XX:XX:XX
1970 vim_net_id: #network id where this interface is connected
1971 vim_interface_id: #interface/port VIM id
1972 ip_address: #null, or text with IPv4, IPv6 address
1973 """
1974
1975 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1976
1977 vdc = self.get_vdc_details()
1978 if vdc is None:
1979 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1980
1981 vms_dict = {}
1982 nsx_edge_list = []
1983 for vmuuid in vm_list:
1984 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1985 if vmname is not None:
1986
1987 try:
1988 vm_pci_details = self.get_vm_pci_details(vmuuid)
1989 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1990 vm_info = the_vapp.get_vms_details()
1991 vm_status = vm_info[0]['status']
1992 vm_info[0].update(vm_pci_details)
1993
1994 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1995 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1996 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1997
1998 # get networks
1999 vm_app_networks = the_vapp.get_vms_network_info()
2000 for vapp_network in vm_app_networks:
2001 for vm_network in vapp_network:
2002 if vm_network['name'] == vmname:
2003 #Assign IP Address based on MAC Address in NSX DHCP lease info
2004 if vm_network['ip'] is None:
2005 if not nsx_edge_list:
2006 nsx_edge_list = self.get_edge_details()
2007 if nsx_edge_list is None:
2008 raise vimconn.vimconnException("refresh_vms_status:"\
2009 "Failed to get edge details from NSX Manager")
2010 if vm_network['mac'] is not None:
2011 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2012
2013 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2014 interface = {"mac_address": vm_network['mac'],
2015 "vim_net_id": vm_net_id,
2016 "vim_interface_id": vm_net_id,
2017 'ip_address': vm_network['ip']}
2018 # interface['vim_info'] = yaml.safe_dump(vm_network)
2019 vm_dict["interfaces"].append(interface)
2020 # add a vm to vm dict
2021 vms_dict.setdefault(vmuuid, vm_dict)
2022 except Exception as exp:
2023 self.logger.debug("Error in response {}".format(exp))
2024 self.logger.debug(traceback.format_exc())
2025
2026 return vms_dict
2027
2028
2029 def get_edge_details(self):
2030 """Get the NSX edge list from NSX Manager
2031 Returns list of NSX edges
2032 """
2033 edge_list = []
2034 rheaders = {'Content-Type': 'application/xml'}
2035 nsx_api_url = '/api/4.0/edges'
2036
2037 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2038
2039 try:
2040 resp = requests.get(self.nsx_manager + nsx_api_url,
2041 auth = (self.nsx_user, self.nsx_password),
2042 verify = False, headers = rheaders)
2043 if resp.status_code == requests.codes.ok:
2044 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2045 for edge_pages in paged_Edge_List:
2046 if edge_pages.tag == 'edgePage':
2047 for edge_summary in edge_pages:
2048 if edge_summary.tag == 'pagingInfo':
2049 for element in edge_summary:
2050 if element.tag == 'totalCount' and element.text == '0':
2051 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2052 .format(self.nsx_manager))
2053
2054 if edge_summary.tag == 'edgeSummary':
2055 for element in edge_summary:
2056 if element.tag == 'id':
2057 edge_list.append(element.text)
2058 else:
2059 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2060 .format(self.nsx_manager))
2061
2062 if not edge_list:
2063 raise vimconn.vimconnException("get_edge_details: "\
2064 "No NSX edge details found: {}"
2065 .format(self.nsx_manager))
2066 else:
2067 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2068 return edge_list
2069 else:
2070 self.logger.debug("get_edge_details: "
2071 "Failed to get NSX edge details from NSX Manager: {}"
2072 .format(resp.content))
2073 return None
2074
2075 except Exception as exp:
2076 self.logger.debug("get_edge_details: "\
2077 "Failed to get NSX edge details from NSX Manager: {}"
2078 .format(exp))
2079 raise vimconn.vimconnException("get_edge_details: "\
2080 "Failed to get NSX edge details from NSX Manager: {}"
2081 .format(exp))
2082
2083
2084 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2085 """Get IP address details from NSX edges, using the MAC address
2086 PARAMS: nsx_edges : List of NSX edges
2087 mac_address : Find IP address corresponding to this MAC address
2088 Returns: IP address corrresponding to the provided MAC address
2089 """
2090
2091 ip_addr = None
2092 rheaders = {'Content-Type': 'application/xml'}
2093
2094 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2095
2096 try:
2097 for edge in nsx_edges:
2098 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2099
2100 resp = requests.get(self.nsx_manager + nsx_api_url,
2101 auth = (self.nsx_user, self.nsx_password),
2102 verify = False, headers = rheaders)
2103
2104 if resp.status_code == requests.codes.ok:
2105 dhcp_leases = XmlElementTree.fromstring(resp.text)
2106 for child in dhcp_leases:
2107 if child.tag == 'dhcpLeaseInfo':
2108 dhcpLeaseInfo = child
2109 for leaseInfo in dhcpLeaseInfo:
2110 for elem in leaseInfo:
2111 if (elem.tag)=='macAddress':
2112 edge_mac_addr = elem.text
2113 if (elem.tag)=='ipAddress':
2114 ip_addr = elem.text
2115 if edge_mac_addr is not None:
2116 if edge_mac_addr == mac_address:
2117 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2118 .format(ip_addr, mac_address,edge))
2119 return ip_addr
2120 else:
2121 self.logger.debug("get_ipaddr_from_NSXedge: "\
2122 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2123 .format(resp.content))
2124
2125 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2126 return None
2127
2128 except XmlElementTree.ParseError as Err:
2129 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2130
2131
2132 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
2133 """Send and action over a VM instance from VIM
2134 Returns the vm_id if the action was successfully sent to the VIM"""
2135
2136 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2137 if vm__vim_uuid is None or action_dict is None:
2138 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2139
2140 vdc = self.get_vdc_details()
2141 if vdc is None:
2142 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
2143
2144 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2145 if vapp_name is None:
2146 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2147 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2148 else:
2149 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2150
2151 try:
2152 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2153 # TODO fix all status
2154 if "start" in action_dict:
2155 vm_info = the_vapp.get_vms_details()
2156 vm_status = vm_info[0]['status']
2157 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2158 if vm_status == "Suspended" or vm_status == "Powered off":
2159 power_on_task = the_vapp.poweron()
2160 result = self.vca.block_until_completed(power_on_task)
2161 self.instance_actions_result("start", result, vapp_name)
2162 elif "rebuild" in action_dict:
2163 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2164 rebuild_task = the_vapp.deploy(powerOn=True)
2165 result = self.vca.block_until_completed(rebuild_task)
2166 self.instance_actions_result("rebuild", result, vapp_name)
2167 elif "pause" in action_dict:
2168 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2169 pause_task = the_vapp.undeploy(action='suspend')
2170 result = self.vca.block_until_completed(pause_task)
2171 self.instance_actions_result("pause", result, vapp_name)
2172 elif "resume" in action_dict:
2173 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2174 power_task = the_vapp.poweron()
2175 result = self.vca.block_until_completed(power_task)
2176 self.instance_actions_result("resume", result, vapp_name)
2177 elif "shutoff" in action_dict or "shutdown" in action_dict:
2178 action_name , value = action_dict.items()[0]
2179 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2180 power_off_task = the_vapp.undeploy(action='powerOff')
2181 result = self.vca.block_until_completed(power_off_task)
2182 if action_name == "shutdown":
2183 self.instance_actions_result("shutdown", result, vapp_name)
2184 else:
2185 self.instance_actions_result("shutoff", result, vapp_name)
2186 elif "forceOff" in action_dict:
2187 result = the_vapp.undeploy(action='force')
2188 self.instance_actions_result("forceOff", result, vapp_name)
2189 elif "reboot" in action_dict:
2190 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2191 reboot_task = the_vapp.reboot()
2192 else:
2193 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2194 return vm__vim_uuid
2195 except Exception as exp :
2196 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2197 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2198
2199 def instance_actions_result(self, action, result, vapp_name):
2200 if result:
2201 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2202 else:
2203 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2204
2205 def get_vminstance_console(self, vm_id, console_type="vnc"):
2206 """
2207 Get a console for the virtual machine
2208 Params:
2209 vm_id: uuid of the VM
2210 console_type, can be:
2211 "novnc" (by default), "xvpvnc" for VNC types,
2212 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2213 Returns dict with the console parameters:
2214 protocol: ssh, ftp, http, https, ...
2215 server: usually ip address
2216 port: the http, ssh, ... port
2217 suffix: extra text, e.g. the http path and query string
2218 """
2219 raise vimconn.vimconnNotImplemented("Should have implemented this")
2220
2221 # NOT USED METHODS in current version
2222
2223 def host_vim2gui(self, host, server_dict):
2224 """Transform host dictionary from VIM format to GUI format,
2225 and append to the server_dict
2226 """
2227 raise vimconn.vimconnNotImplemented("Should have implemented this")
2228
2229 def get_hosts_info(self):
2230 """Get the information of deployed hosts
2231 Returns the hosts content"""
2232 raise vimconn.vimconnNotImplemented("Should have implemented this")
2233
2234 def get_hosts(self, vim_tenant):
2235 """Get the hosts and deployed instances
2236 Returns the hosts content"""
2237 raise vimconn.vimconnNotImplemented("Should have implemented this")
2238
2239 def get_processor_rankings(self):
2240 """Get the processor rankings in the VIM database"""
2241 raise vimconn.vimconnNotImplemented("Should have implemented this")
2242
2243 def new_host(self, host_data):
2244 """Adds a new host to VIM"""
2245 '''Returns status code of the VIM response'''
2246 raise vimconn.vimconnNotImplemented("Should have implemented this")
2247
2248 def new_external_port(self, port_data):
2249 """Adds a external port to VIM"""
2250 '''Returns the port identifier'''
2251 raise vimconn.vimconnNotImplemented("Should have implemented this")
2252
2253 def new_external_network(self, net_name, net_type):
2254 """Adds a external network to VIM (shared)"""
2255 '''Returns the network identifier'''
2256 raise vimconn.vimconnNotImplemented("Should have implemented this")
2257
2258 def connect_port_network(self, port_id, network_id, admin=False):
2259 """Connects a external port to a network"""
2260 '''Returns status code of the VIM response'''
2261 raise vimconn.vimconnNotImplemented("Should have implemented this")
2262
2263 def new_vminstancefromJSON(self, vm_data):
2264 """Adds a VM instance to VIM"""
2265 '''Returns the instance identifier'''
2266 raise vimconn.vimconnNotImplemented("Should have implemented this")
2267
2268 def get_network_name_by_id(self, network_uuid=None):
2269 """Method gets vcloud director network named based on supplied uuid.
2270
2271 Args:
2272 network_uuid: network_id
2273
2274 Returns:
2275 The return network name.
2276 """
2277
2278 if not network_uuid:
2279 return None
2280
2281 try:
2282 org_dict = self.get_org(self.org_uuid)
2283 if 'networks' in org_dict:
2284 org_network_dict = org_dict['networks']
2285 for net_uuid in org_network_dict:
2286 if net_uuid == network_uuid:
2287 return org_network_dict[net_uuid]
2288 except:
2289 self.logger.debug("Exception in get_network_name_by_id")
2290 self.logger.debug(traceback.format_exc())
2291
2292 return None
2293
2294 def get_network_id_by_name(self, network_name=None):
2295 """Method gets vcloud director network uuid based on supplied name.
2296
2297 Args:
2298 network_name: network_name
2299 Returns:
2300 The return network uuid.
2301 network_uuid: network_id
2302 """
2303
2304 if not network_name:
2305 self.logger.debug("get_network_id_by_name() : Network name is empty")
2306 return None
2307
2308 try:
2309 org_dict = self.get_org(self.org_uuid)
2310 if org_dict and 'networks' in org_dict:
2311 org_network_dict = org_dict['networks']
2312 for net_uuid,net_name in org_network_dict.iteritems():
2313 if net_name == network_name:
2314 return net_uuid
2315
2316 except KeyError as exp:
2317 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2318
2319 return None
2320
2321 def list_org_action(self):
2322 """
2323 Method leverages vCloud director and query for available organization for particular user
2324
2325 Args:
2326 vca - is active VCA connection.
2327 vdc_name - is a vdc name that will be used to query vms action
2328
2329 Returns:
2330 The return XML respond
2331 """
2332
2333 url_list = [self.vca.host, '/api/org']
2334 vm_list_rest_call = ''.join(url_list)
2335
2336 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2337 response = Http.get(url=vm_list_rest_call,
2338 headers=self.vca.vcloud_session.get_vcloud_headers(),
2339 verify=self.vca.verify,
2340 logger=self.vca.logger)
2341
2342 if response.status_code == 403:
2343 response = self.retry_rest('GET', vm_list_rest_call)
2344
2345 if response.status_code == requests.codes.ok:
2346 return response.content
2347
2348 return None
2349
2350 def get_org_action(self, org_uuid=None):
2351 """
2352 Method leverages vCloud director and retrieve available object fdr organization.
2353
2354 Args:
2355 vca - is active VCA connection.
2356 vdc_name - is a vdc name that will be used to query vms action
2357
2358 Returns:
2359 The return XML respond
2360 """
2361
2362 if org_uuid is None:
2363 return None
2364
2365 url_list = [self.vca.host, '/api/org/', org_uuid]
2366 vm_list_rest_call = ''.join(url_list)
2367
2368 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2369 response = Http.get(url=vm_list_rest_call,
2370 headers=self.vca.vcloud_session.get_vcloud_headers(),
2371 verify=self.vca.verify,
2372 logger=self.vca.logger)
2373
2374 #Retry login if session expired & retry sending request
2375 if response.status_code == 403:
2376 response = self.retry_rest('GET', vm_list_rest_call)
2377
2378 if response.status_code == requests.codes.ok:
2379 return response.content
2380
2381 return None
2382
2383 def get_org(self, org_uuid=None):
2384 """
2385 Method retrieves available organization in vCloud Director
2386
2387 Args:
2388 org_uuid - is a organization uuid.
2389
2390 Returns:
2391 The return dictionary with following key
2392 "network" - for network list under the org
2393 "catalogs" - for network list under the org
2394 "vdcs" - for vdc list under org
2395 """
2396
2397 org_dict = {}
2398
2399 if org_uuid is None:
2400 return org_dict
2401
2402 content = self.get_org_action(org_uuid=org_uuid)
2403 try:
2404 vdc_list = {}
2405 network_list = {}
2406 catalog_list = {}
2407 vm_list_xmlroot = XmlElementTree.fromstring(content)
2408 for child in vm_list_xmlroot:
2409 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2410 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2411 org_dict['vdcs'] = vdc_list
2412 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2413 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2414 org_dict['networks'] = network_list
2415 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2416 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2417 org_dict['catalogs'] = catalog_list
2418 except:
2419 pass
2420
2421 return org_dict
2422
2423 def get_org_list(self):
2424 """
2425 Method retrieves available organization in vCloud Director
2426
2427 Args:
2428 vca - is active VCA connection.
2429
2430 Returns:
2431 The return dictionary and key for each entry VDC UUID
2432 """
2433
2434 org_dict = {}
2435
2436 content = self.list_org_action()
2437 try:
2438 vm_list_xmlroot = XmlElementTree.fromstring(content)
2439 for vm_xml in vm_list_xmlroot:
2440 if vm_xml.tag.split("}")[1] == 'Org':
2441 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2442 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2443 except:
2444 pass
2445
2446 return org_dict
2447
2448 def vms_view_action(self, vdc_name=None):
2449 """ Method leverages vCloud director vms query call
2450
2451 Args:
2452 vca - is active VCA connection.
2453 vdc_name - is a vdc name that will be used to query vms action
2454
2455 Returns:
2456 The return XML respond
2457 """
2458 vca = self.connect()
2459 if vdc_name is None:
2460 return None
2461
2462 url_list = [vca.host, '/api/vms/query']
2463 vm_list_rest_call = ''.join(url_list)
2464
2465 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2466 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2467 vca.vcloud_session.organization.Link)
2468 if len(refs) == 1:
2469 response = Http.get(url=vm_list_rest_call,
2470 headers=vca.vcloud_session.get_vcloud_headers(),
2471 verify=vca.verify,
2472 logger=vca.logger)
2473 if response.status_code == requests.codes.ok:
2474 return response.content
2475
2476 return None
2477
2478 def get_vapp_list(self, vdc_name=None):
2479 """
2480 Method retrieves vApp list deployed vCloud director and returns a dictionary
2481 contains a list of all vapp deployed for queried VDC.
2482 The key for a dictionary is vApp UUID
2483
2484
2485 Args:
2486 vca - is active VCA connection.
2487 vdc_name - is a vdc name that will be used to query vms action
2488
2489 Returns:
2490 The return dictionary and key for each entry vapp UUID
2491 """
2492
2493 vapp_dict = {}
2494 if vdc_name is None:
2495 return vapp_dict
2496
2497 content = self.vms_view_action(vdc_name=vdc_name)
2498 try:
2499 vm_list_xmlroot = XmlElementTree.fromstring(content)
2500 for vm_xml in vm_list_xmlroot:
2501 if vm_xml.tag.split("}")[1] == 'VMRecord':
2502 if vm_xml.attrib['isVAppTemplate'] == 'true':
2503 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2504 if 'vappTemplate-' in rawuuid[0]:
2505 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2506 # vm and use raw UUID as key
2507 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2508 except:
2509 pass
2510
2511 return vapp_dict
2512
2513 def get_vm_list(self, vdc_name=None):
2514 """
2515 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2516 contains a list of all VM's deployed for queried VDC.
2517 The key for a dictionary is VM UUID
2518
2519
2520 Args:
2521 vca - is active VCA connection.
2522 vdc_name - is a vdc name that will be used to query vms action
2523
2524 Returns:
2525 The return dictionary and key for each entry vapp UUID
2526 """
2527 vm_dict = {}
2528
2529 if vdc_name is None:
2530 return vm_dict
2531
2532 content = self.vms_view_action(vdc_name=vdc_name)
2533 try:
2534 vm_list_xmlroot = XmlElementTree.fromstring(content)
2535 for vm_xml in vm_list_xmlroot:
2536 if vm_xml.tag.split("}")[1] == 'VMRecord':
2537 if vm_xml.attrib['isVAppTemplate'] == 'false':
2538 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2539 if 'vm-' in rawuuid[0]:
2540 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2541 # vm and use raw UUID as key
2542 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2543 except:
2544 pass
2545
2546 return vm_dict
2547
2548 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2549 """
2550 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2551 contains a list of all VM's deployed for queried VDC.
2552 The key for a dictionary is VM UUID
2553
2554
2555 Args:
2556 vca - is active VCA connection.
2557 vdc_name - is a vdc name that will be used to query vms action
2558
2559 Returns:
2560 The return dictionary and key for each entry vapp UUID
2561 """
2562 vm_dict = {}
2563 vca = self.connect()
2564 if not vca:
2565 raise vimconn.vimconnConnectionException("self.connect() is failed")
2566
2567 if vdc_name is None:
2568 return vm_dict
2569
2570 content = self.vms_view_action(vdc_name=vdc_name)
2571 try:
2572 vm_list_xmlroot = XmlElementTree.fromstring(content)
2573 for vm_xml in vm_list_xmlroot:
2574 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2575 # lookup done by UUID
2576 if isuuid:
2577 if vapp_name in vm_xml.attrib['container']:
2578 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2579 if 'vm-' in rawuuid[0]:
2580 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2581 break
2582 # lookup done by Name
2583 else:
2584 if vapp_name in vm_xml.attrib['name']:
2585 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2586 if 'vm-' in rawuuid[0]:
2587 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2588 break
2589 except:
2590 pass
2591
2592 return vm_dict
2593
2594 def get_network_action(self, network_uuid=None):
2595 """
2596 Method leverages vCloud director and query network based on network uuid
2597
2598 Args:
2599 vca - is active VCA connection.
2600 network_uuid - is a network uuid
2601
2602 Returns:
2603 The return XML respond
2604 """
2605
2606 if network_uuid is None:
2607 return None
2608
2609 url_list = [self.vca.host, '/api/network/', network_uuid]
2610 vm_list_rest_call = ''.join(url_list)
2611
2612 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2613 response = Http.get(url=vm_list_rest_call,
2614 headers=self.vca.vcloud_session.get_vcloud_headers(),
2615 verify=self.vca.verify,
2616 logger=self.vca.logger)
2617
2618 #Retry login if session expired & retry sending request
2619 if response.status_code == 403:
2620 response = self.retry_rest('GET', vm_list_rest_call)
2621
2622 if response.status_code == requests.codes.ok:
2623 return response.content
2624
2625 return None
2626
2627 def get_vcd_network(self, network_uuid=None):
2628 """
2629 Method retrieves available network from vCloud Director
2630
2631 Args:
2632 network_uuid - is VCD network UUID
2633
2634 Each element serialized as key : value pair
2635
2636 Following keys available for access. network_configuration['Gateway'}
2637 <Configuration>
2638 <IpScopes>
2639 <IpScope>
2640 <IsInherited>true</IsInherited>
2641 <Gateway>172.16.252.100</Gateway>
2642 <Netmask>255.255.255.0</Netmask>
2643 <Dns1>172.16.254.201</Dns1>
2644 <Dns2>172.16.254.202</Dns2>
2645 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2646 <IsEnabled>true</IsEnabled>
2647 <IpRanges>
2648 <IpRange>
2649 <StartAddress>172.16.252.1</StartAddress>
2650 <EndAddress>172.16.252.99</EndAddress>
2651 </IpRange>
2652 </IpRanges>
2653 </IpScope>
2654 </IpScopes>
2655 <FenceMode>bridged</FenceMode>
2656
2657 Returns:
2658 The return dictionary and key for each entry vapp UUID
2659 """
2660
2661 network_configuration = {}
2662 if network_uuid is None:
2663 return network_uuid
2664
2665 try:
2666 content = self.get_network_action(network_uuid=network_uuid)
2667 vm_list_xmlroot = XmlElementTree.fromstring(content)
2668
2669 network_configuration['status'] = vm_list_xmlroot.get("status")
2670 network_configuration['name'] = vm_list_xmlroot.get("name")
2671 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2672
2673 for child in vm_list_xmlroot:
2674 if child.tag.split("}")[1] == 'IsShared':
2675 network_configuration['isShared'] = child.text.strip()
2676 if child.tag.split("}")[1] == 'Configuration':
2677 for configuration in child.iter():
2678 tagKey = configuration.tag.split("}")[1].strip()
2679 if tagKey != "":
2680 network_configuration[tagKey] = configuration.text.strip()
2681 return network_configuration
2682 except Exception as exp :
2683 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2684 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2685
2686 return network_configuration
2687
2688 def delete_network_action(self, network_uuid=None):
2689 """
2690 Method delete given network from vCloud director
2691
2692 Args:
2693 network_uuid - is a network uuid that client wish to delete
2694
2695 Returns:
2696 The return None or XML respond or false
2697 """
2698
2699 vca = self.connect_as_admin()
2700 if not vca:
2701 raise vimconn.vimconnConnectionException("self.connect() is failed")
2702 if network_uuid is None:
2703 return False
2704
2705 url_list = [vca.host, '/api/admin/network/', network_uuid]
2706 vm_list_rest_call = ''.join(url_list)
2707
2708 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2709 response = Http.delete(url=vm_list_rest_call,
2710 headers=vca.vcloud_session.get_vcloud_headers(),
2711 verify=vca.verify,
2712 logger=vca.logger)
2713
2714 if response.status_code == 202:
2715 return True
2716
2717 return False
2718
2719 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2720 ip_profile=None, isshared='true'):
2721 """
2722 Method create network in vCloud director
2723
2724 Args:
2725 network_name - is network name to be created.
2726 net_type - can be 'bridge','data','ptp','mgmt'.
2727 ip_profile is a dict containing the IP parameters of the network
2728 isshared - is a boolean
2729 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2730 It optional attribute. by default if no parent network indicate the first available will be used.
2731
2732 Returns:
2733 The return network uuid or return None
2734 """
2735
2736 new_network_name = [network_name, '-', str(uuid.uuid4())]
2737 content = self.create_network_rest(network_name=''.join(new_network_name),
2738 ip_profile=ip_profile,
2739 net_type=net_type,
2740 parent_network_uuid=parent_network_uuid,
2741 isshared=isshared)
2742 if content is None:
2743 self.logger.debug("Failed create network {}.".format(network_name))
2744 return None
2745
2746 try:
2747 vm_list_xmlroot = XmlElementTree.fromstring(content)
2748 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2749 if len(vcd_uuid) == 4:
2750 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2751 return vcd_uuid[3]
2752 except:
2753 self.logger.debug("Failed create network {}".format(network_name))
2754 return None
2755
2756 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2757 ip_profile=None, isshared='true'):
2758 """
2759 Method create network in vCloud director
2760
2761 Args:
2762 network_name - is network name to be created.
2763 net_type - can be 'bridge','data','ptp','mgmt'.
2764 ip_profile is a dict containing the IP parameters of the network
2765 isshared - is a boolean
2766 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2767 It optional attribute. by default if no parent network indicate the first available will be used.
2768
2769 Returns:
2770 The return network uuid or return None
2771 """
2772
2773 vca = self.connect_as_admin()
2774 if not vca:
2775 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2776 if network_name is None:
2777 return None
2778
2779 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2780 vm_list_rest_call = ''.join(url_list)
2781 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2782 response = Http.get(url=vm_list_rest_call,
2783 headers=vca.vcloud_session.get_vcloud_headers(),
2784 verify=vca.verify,
2785 logger=vca.logger)
2786
2787 provider_network = None
2788 available_networks = None
2789 add_vdc_rest_url = None
2790
2791 if response.status_code != requests.codes.ok:
2792 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2793 response.status_code))
2794 return None
2795 else:
2796 try:
2797 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2798 for child in vm_list_xmlroot:
2799 if child.tag.split("}")[1] == 'ProviderVdcReference':
2800 provider_network = child.attrib.get('href')
2801 # application/vnd.vmware.admin.providervdc+xml
2802 if child.tag.split("}")[1] == 'Link':
2803 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2804 and child.attrib.get('rel') == 'add':
2805 add_vdc_rest_url = child.attrib.get('href')
2806 except:
2807 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2808 self.logger.debug("Respond body {}".format(response.content))
2809 return None
2810
2811 # find pvdc provided available network
2812 response = Http.get(url=provider_network,
2813 headers=vca.vcloud_session.get_vcloud_headers(),
2814 verify=vca.verify,
2815 logger=vca.logger)
2816 if response.status_code != requests.codes.ok:
2817 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2818 response.status_code))
2819 return None
2820
2821 # available_networks.split("/")[-1]
2822
2823 if parent_network_uuid is None:
2824 try:
2825 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2826 for child in vm_list_xmlroot.iter():
2827 if child.tag.split("}")[1] == 'AvailableNetworks':
2828 for networks in child.iter():
2829 # application/vnd.vmware.admin.network+xml
2830 if networks.attrib.get('href') is not None:
2831 available_networks = networks.attrib.get('href')
2832 break
2833 except:
2834 return None
2835
2836 try:
2837 #Configure IP profile of the network
2838 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2839
2840 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2841 subnet_rand = random.randint(0, 255)
2842 ip_base = "192.168.{}.".format(subnet_rand)
2843 ip_profile['subnet_address'] = ip_base + "0/24"
2844 else:
2845 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2846
2847 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2848 ip_profile['gateway_address']=ip_base + "1"
2849 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2850 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2851 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2852 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2853 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2854 ip_profile['dhcp_start_address']=ip_base + "3"
2855 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2856 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2857 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2858 ip_profile['dns_address']=ip_base + "2"
2859
2860 gateway_address=ip_profile['gateway_address']
2861 dhcp_count=int(ip_profile['dhcp_count'])
2862 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2863
2864 if ip_profile['dhcp_enabled']==True:
2865 dhcp_enabled='true'
2866 else:
2867 dhcp_enabled='false'
2868 dhcp_start_address=ip_profile['dhcp_start_address']
2869
2870 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2871 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2872 end_ip_int += dhcp_count - 1
2873 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2874
2875 ip_version=ip_profile['ip_version']
2876 dns_address=ip_profile['dns_address']
2877 except KeyError as exp:
2878 self.logger.debug("Create Network REST: Key error {}".format(exp))
2879 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2880
2881 # either use client provided UUID or search for a first available
2882 # if both are not defined we return none
2883 if parent_network_uuid is not None:
2884 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2885 add_vdc_rest_url = ''.join(url_list)
2886
2887 #Creating all networks as Direct Org VDC type networks.
2888 #Unused in case of Underlay (data/ptp) network interface.
2889 fence_mode="bridged"
2890 is_inherited='false'
2891 dns_list = dns_address.split(";")
2892 dns1 = dns_list[0]
2893 dns2_text = ""
2894 if len(dns_list) >= 2:
2895 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2896 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2897 <Description>Openmano created</Description>
2898 <Configuration>
2899 <IpScopes>
2900 <IpScope>
2901 <IsInherited>{1:s}</IsInherited>
2902 <Gateway>{2:s}</Gateway>
2903 <Netmask>{3:s}</Netmask>
2904 <Dns1>{4:s}</Dns1>{5:s}
2905 <IsEnabled>{6:s}</IsEnabled>
2906 <IpRanges>
2907 <IpRange>
2908 <StartAddress>{7:s}</StartAddress>
2909 <EndAddress>{8:s}</EndAddress>
2910 </IpRange>
2911 </IpRanges>
2912 </IpScope>
2913 </IpScopes>
2914 <ParentNetwork href="{9:s}"/>
2915 <FenceMode>{10:s}</FenceMode>
2916 </Configuration>
2917 <IsShared>{11:s}</IsShared>
2918 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2919 subnet_address, dns1, dns2_text, dhcp_enabled,
2920 dhcp_start_address, dhcp_end_address, available_networks,
2921 fence_mode, isshared)
2922
2923 headers = vca.vcloud_session.get_vcloud_headers()
2924 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2925 try:
2926 response = Http.post(url=add_vdc_rest_url,
2927 headers=headers,
2928 data=data,
2929 verify=vca.verify,
2930 logger=vca.logger)
2931
2932 if response.status_code != 201:
2933 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2934 .format(response.status_code,response.content))
2935 else:
2936 network = networkType.parseString(response.content, True)
2937 create_nw_task = network.get_Tasks().get_Task()[0]
2938
2939 # if we all ok we respond with content after network creation completes
2940 # otherwise by default return None
2941 if create_nw_task is not None:
2942 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2943 status = vca.block_until_completed(create_nw_task)
2944 if status:
2945 return response.content
2946 else:
2947 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2948 .format(response.content))
2949 except Exception as exp:
2950 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2951
2952 return None
2953
2954 def convert_cidr_to_netmask(self, cidr_ip=None):
2955 """
2956 Method sets convert CIDR netmask address to normal IP format
2957 Args:
2958 cidr_ip : CIDR IP address
2959 Returns:
2960 netmask : Converted netmask
2961 """
2962 if cidr_ip is not None:
2963 if '/' in cidr_ip:
2964 network, net_bits = cidr_ip.split('/')
2965 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2966 else:
2967 netmask = cidr_ip
2968 return netmask
2969 return None
2970
2971 def get_provider_rest(self, vca=None):
2972 """
2973 Method gets provider vdc view from vcloud director
2974
2975 Args:
2976 network_name - is network name to be created.
2977 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2978 It optional attribute. by default if no parent network indicate the first available will be used.
2979
2980 Returns:
2981 The return xml content of respond or None
2982 """
2983
2984 url_list = [vca.host, '/api/admin']
2985 response = Http.get(url=''.join(url_list),
2986 headers=vca.vcloud_session.get_vcloud_headers(),
2987 verify=vca.verify,
2988 logger=vca.logger)
2989
2990 if response.status_code == requests.codes.ok:
2991 return response.content
2992 return None
2993
2994 def create_vdc(self, vdc_name=None):
2995
2996 vdc_dict = {}
2997
2998 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2999 if xml_content is not None:
3000 try:
3001 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3002 for child in task_resp_xmlroot:
3003 if child.tag.split("}")[1] == 'Owner':
3004 vdc_id = child.attrib.get('href').split("/")[-1]
3005 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3006 return vdc_dict
3007 except:
3008 self.logger.debug("Respond body {}".format(xml_content))
3009
3010 return None
3011
3012 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3013 """
3014 Method create vdc in vCloud director based on VDC template.
3015 it uses pre-defined template that must be named openmano
3016
3017 Args:
3018 vdc_name - name of a new vdc.
3019
3020 Returns:
3021 The return xml content of respond or None
3022 """
3023
3024 self.logger.info("Creating new vdc {}".format(vdc_name))
3025 vca = self.connect()
3026 if not vca:
3027 raise vimconn.vimconnConnectionException("self.connect() is failed")
3028 if vdc_name is None:
3029 return None
3030
3031 url_list = [vca.host, '/api/vdcTemplates']
3032 vm_list_rest_call = ''.join(url_list)
3033 response = Http.get(url=vm_list_rest_call,
3034 headers=vca.vcloud_session.get_vcloud_headers(),
3035 verify=vca.verify,
3036 logger=vca.logger)
3037
3038 # container url to a template
3039 vdc_template_ref = None
3040 try:
3041 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3042 for child in vm_list_xmlroot:
3043 # application/vnd.vmware.admin.providervdc+xml
3044 # we need find a template from witch we instantiate VDC
3045 if child.tag.split("}")[1] == 'VdcTemplate':
3046 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3047 vdc_template_ref = child.attrib.get('href')
3048 except:
3049 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3050 self.logger.debug("Respond body {}".format(response.content))
3051 return None
3052
3053 # if we didn't found required pre defined template we return None
3054 if vdc_template_ref is None:
3055 return None
3056
3057 try:
3058 # instantiate vdc
3059 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3060 vm_list_rest_call = ''.join(url_list)
3061 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3062 <Source href="{1:s}"></Source>
3063 <Description>opnemano</Description>
3064 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3065 headers = vca.vcloud_session.get_vcloud_headers()
3066 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3067 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3068 logger=vca.logger)
3069
3070 vdc_task = taskType.parseString(response.content, True)
3071 if type(vdc_task) is GenericTask:
3072 self.vca.block_until_completed(vdc_task)
3073
3074 # if we all ok we respond with content otherwise by default None
3075 if response.status_code >= 200 and response.status_code < 300:
3076 return response.content
3077 return None
3078 except:
3079 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3080 self.logger.debug("Respond body {}".format(response.content))
3081
3082 return None
3083
3084 def create_vdc_rest(self, vdc_name=None):
3085 """
3086 Method create network in vCloud director
3087
3088 Args:
3089 network_name - is network name to be created.
3090 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3091 It optional attribute. by default if no parent network indicate the first available will be used.
3092
3093 Returns:
3094 The return network uuid or return None
3095 """
3096
3097 self.logger.info("Creating new vdc {}".format(vdc_name))
3098
3099 vca = self.connect_as_admin()
3100 if not vca:
3101 raise vimconn.vimconnConnectionException("self.connect() is failed")
3102 if vdc_name is None:
3103 return None
3104
3105 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3106 vm_list_rest_call = ''.join(url_list)
3107 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3108 response = Http.get(url=vm_list_rest_call,
3109 headers=vca.vcloud_session.get_vcloud_headers(),
3110 verify=vca.verify,
3111 logger=vca.logger)
3112
3113 provider_vdc_ref = None
3114 add_vdc_rest_url = None
3115 available_networks = None
3116
3117 if response.status_code != requests.codes.ok:
3118 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3119 response.status_code))
3120 return None
3121 else:
3122 try:
3123 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3124 for child in vm_list_xmlroot:
3125 # application/vnd.vmware.admin.providervdc+xml
3126 if child.tag.split("}")[1] == 'Link':
3127 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3128 and child.attrib.get('rel') == 'add':
3129 add_vdc_rest_url = child.attrib.get('href')
3130 except:
3131 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3132 self.logger.debug("Respond body {}".format(response.content))
3133 return None
3134
3135 response = self.get_provider_rest(vca=vca)
3136 try:
3137 vm_list_xmlroot = XmlElementTree.fromstring(response)
3138 for child in vm_list_xmlroot:
3139 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3140 for sub_child in child:
3141 provider_vdc_ref = sub_child.attrib.get('href')
3142 except:
3143 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3144 self.logger.debug("Respond body {}".format(response))
3145 return None
3146
3147 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3148 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3149 <AllocationModel>ReservationPool</AllocationModel>
3150 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3151 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3152 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3153 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3154 <ProviderVdcReference
3155 name="Main Provider"
3156 href="{2:s}" />
3157 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3158 escape(vdc_name),
3159 provider_vdc_ref)
3160
3161 headers = vca.vcloud_session.get_vcloud_headers()
3162 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3163 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3164 logger=vca.logger)
3165
3166 # if we all ok we respond with content otherwise by default None
3167 if response.status_code == 201:
3168 return response.content
3169 return None
3170
3171 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3172 """
3173 Method retrieve vapp detail from vCloud director
3174
3175 Args:
3176 vapp_uuid - is vapp identifier.
3177
3178 Returns:
3179 The return network uuid or return None
3180 """
3181
3182 parsed_respond = {}
3183 vca = None
3184
3185 if need_admin_access:
3186 vca = self.connect_as_admin()
3187 else:
3188 vca = self.vca
3189
3190 if not vca:
3191 raise vimconn.vimconnConnectionException("self.connect() is failed")
3192 if vapp_uuid is None:
3193 return None
3194
3195 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3196 get_vapp_restcall = ''.join(url_list)
3197
3198 if vca.vcloud_session and vca.vcloud_session.organization:
3199 response = Http.get(url=get_vapp_restcall,
3200 headers=vca.vcloud_session.get_vcloud_headers(),
3201 verify=vca.verify,
3202 logger=vca.logger)
3203
3204 if response.status_code == 403:
3205 if need_admin_access == False:
3206 response = self.retry_rest('GET', get_vapp_restcall)
3207
3208 if response.status_code != requests.codes.ok:
3209 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3210 response.status_code))
3211 return parsed_respond
3212
3213 try:
3214 xmlroot_respond = XmlElementTree.fromstring(response.content)
3215 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3216
3217 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3218 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3219 'vmw': 'http://www.vmware.com/schema/ovf',
3220 'vm': 'http://www.vmware.com/vcloud/v1.5',
3221 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3222 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3223 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3224 }
3225
3226 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3227 if created_section is not None:
3228 parsed_respond['created'] = created_section.text
3229
3230 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3231 if network_section is not None and 'networkName' in network_section.attrib:
3232 parsed_respond['networkname'] = network_section.attrib['networkName']
3233
3234 ipscopes_section = \
3235 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3236 namespaces)
3237 if ipscopes_section is not None:
3238 for ipscope in ipscopes_section:
3239 for scope in ipscope:
3240 tag_key = scope.tag.split("}")[1]
3241 if tag_key == 'IpRanges':
3242 ip_ranges = scope.getchildren()
3243 for ipblock in ip_ranges:
3244 for block in ipblock:
3245 parsed_respond[block.tag.split("}")[1]] = block.text
3246 else:
3247 parsed_respond[tag_key] = scope.text
3248
3249 # parse children section for other attrib
3250 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3251 if children_section is not None:
3252 parsed_respond['name'] = children_section.attrib['name']
3253 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3254 if "nestedHypervisorEnabled" in children_section.attrib else None
3255 parsed_respond['deployed'] = children_section.attrib['deployed']
3256 parsed_respond['status'] = children_section.attrib['status']
3257 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3258 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3259 nic_list = []
3260 for adapters in network_adapter:
3261 adapter_key = adapters.tag.split("}")[1]
3262 if adapter_key == 'PrimaryNetworkConnectionIndex':
3263 parsed_respond['primarynetwork'] = adapters.text
3264 if adapter_key == 'NetworkConnection':
3265 vnic = {}
3266 if 'network' in adapters.attrib:
3267 vnic['network'] = adapters.attrib['network']
3268 for adapter in adapters:
3269 setting_key = adapter.tag.split("}")[1]
3270 vnic[setting_key] = adapter.text
3271 nic_list.append(vnic)
3272
3273 for link in children_section:
3274 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3275 if link.attrib['rel'] == 'screen:acquireTicket':
3276 parsed_respond['acquireTicket'] = link.attrib
3277 if link.attrib['rel'] == 'screen:acquireMksTicket':
3278 parsed_respond['acquireMksTicket'] = link.attrib
3279
3280 parsed_respond['interfaces'] = nic_list
3281 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3282 if vCloud_extension_section is not None:
3283 vm_vcenter_info = {}
3284 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3285 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3286 if vmext is not None:
3287 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3288 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3289
3290 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3291 vm_virtual_hardware_info = {}
3292 if virtual_hardware_section is not None:
3293 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3294 if item.find("rasd:Description",namespaces).text == "Hard disk":
3295 disk_size = item.find("rasd:HostResource" ,namespaces
3296 ).attrib["{"+namespaces['vm']+"}capacity"]
3297
3298 vm_virtual_hardware_info["disk_size"]= disk_size
3299 break
3300
3301 for link in virtual_hardware_section:
3302 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3303 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3304 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3305 break
3306
3307 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3308 except Exception as exp :
3309 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3310 return parsed_respond
3311
3312 def acuire_console(self, vm_uuid=None):
3313
3314 if vm_uuid is None:
3315 return None
3316
3317 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3318 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3319 console_dict = vm_dict['acquireTicket']
3320 console_rest_call = console_dict['href']
3321
3322 response = Http.post(url=console_rest_call,
3323 headers=self.vca.vcloud_session.get_vcloud_headers(),
3324 verify=self.vca.verify,
3325 logger=self.vca.logger)
3326 if response.status_code == 403:
3327 response = self.retry_rest('POST', console_rest_call)
3328
3329 if response.status_code == requests.codes.ok:
3330 return response.content
3331
3332 return None
3333
3334 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3335 """
3336 Method retrieve vm disk details
3337
3338 Args:
3339 vapp_uuid - is vapp identifier.
3340 flavor_disk - disk size as specified in VNFD (flavor)
3341
3342 Returns:
3343 The return network uuid or return None
3344 """
3345 status = None
3346 try:
3347 #Flavor disk is in GB convert it into MB
3348 flavor_disk = int(flavor_disk) * 1024
3349 vm_details = self.get_vapp_details_rest(vapp_uuid)
3350 if vm_details:
3351 vm_name = vm_details["name"]
3352 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3353
3354 if vm_details and "vm_virtual_hardware" in vm_details:
3355 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3356 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3357
3358 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3359
3360 if flavor_disk > vm_disk:
3361 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3362 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3363 vm_disk, flavor_disk ))
3364 else:
3365 status = True
3366 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3367
3368 return status
3369 except Exception as exp:
3370 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3371
3372
3373 def modify_vm_disk_rest(self, disk_href , disk_size):
3374 """
3375 Method retrieve modify vm disk size
3376
3377 Args:
3378 disk_href - vCD API URL to GET and PUT disk data
3379 disk_size - disk size as specified in VNFD (flavor)
3380
3381 Returns:
3382 The return network uuid or return None
3383 """
3384 if disk_href is None or disk_size is None:
3385 return None
3386
3387 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3388 response = Http.get(url=disk_href,
3389 headers=self.vca.vcloud_session.get_vcloud_headers(),
3390 verify=self.vca.verify,
3391 logger=self.vca.logger)
3392
3393 if response.status_code == 403:
3394 response = self.retry_rest('GET', disk_href)
3395
3396 if response.status_code != requests.codes.ok:
3397 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3398 response.status_code))
3399 return None
3400 try:
3401 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3402 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3403 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3404
3405 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3406 if item.find("rasd:Description",namespaces).text == "Hard disk":
3407 disk_item = item.find("rasd:HostResource" ,namespaces )
3408 if disk_item is not None:
3409 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3410 break
3411
3412 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3413 xml_declaration=True)
3414
3415 #Send PUT request to modify disk size
3416 headers = self.vca.vcloud_session.get_vcloud_headers()
3417 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3418
3419 response = Http.put(url=disk_href,
3420 data=data,
3421 headers=headers,
3422 verify=self.vca.verify, logger=self.logger)
3423
3424 if response.status_code == 403:
3425 add_headers = {'Content-Type': headers['Content-Type']}
3426 response = self.retry_rest('PUT', disk_href, add_headers, data)
3427
3428 if response.status_code != 202:
3429 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3430 response.status_code))
3431 else:
3432 modify_disk_task = taskType.parseString(response.content, True)
3433 if type(modify_disk_task) is GenericTask:
3434 status = self.vca.block_until_completed(modify_disk_task)
3435 return status
3436
3437 return None
3438
3439 except Exception as exp :
3440 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3441 return None
3442
3443 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3444 """
3445 Method to attach pci devices to VM
3446
3447 Args:
3448 vapp_uuid - uuid of vApp/VM
3449 pci_devices - pci devices infromation as specified in VNFD (flavor)
3450
3451 Returns:
3452 The status of add pci device task , vm object and
3453 vcenter_conect object
3454 """
3455 vm_obj = None
3456 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3457 vcenter_conect, content = self.get_vcenter_content()
3458 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3459
3460 if vm_moref_id:
3461 try:
3462 no_of_pci_devices = len(pci_devices)
3463 if no_of_pci_devices > 0:
3464 #Get VM and its host
3465 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3466 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3467 if host_obj and vm_obj:
3468 #get PCI devies from host on which vapp is currently installed
3469 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3470
3471 if avilable_pci_devices is None:
3472 #find other hosts with active pci devices
3473 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3474 content,
3475 no_of_pci_devices
3476 )
3477
3478 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3479 #Migrate vm to the host where PCI devices are availble
3480 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3481 task = self.relocate_vm(new_host_obj, vm_obj)
3482 if task is not None:
3483 result = self.wait_for_vcenter_task(task, vcenter_conect)
3484 self.logger.info("Migrate VM status: {}".format(result))
3485 host_obj = new_host_obj
3486 else:
3487 self.logger.info("Fail to migrate VM : {}".format(result))
3488 raise vimconn.vimconnNotFoundException(
3489 "Fail to migrate VM : {} to host {}".format(
3490 vmname_andid,
3491 new_host_obj)
3492 )
3493
3494 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3495 #Add PCI devices one by one
3496 for pci_device in avilable_pci_devices:
3497 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3498 if task:
3499 status= self.wait_for_vcenter_task(task, vcenter_conect)
3500 if status:
3501 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3502 else:
3503 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3504 return True, vm_obj, vcenter_conect
3505 else:
3506 self.logger.error("Currently there is no host with"\
3507 " {} number of avaialble PCI devices required for VM {}".format(
3508 no_of_pci_devices,
3509 vmname_andid)
3510 )
3511 raise vimconn.vimconnNotFoundException(
3512 "Currently there is no host with {} "\
3513 "number of avaialble PCI devices required for VM {}".format(
3514 no_of_pci_devices,
3515 vmname_andid))
3516 else:
3517 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3518
3519 except vmodl.MethodFault as error:
3520 self.logger.error("Error occurred while adding PCI devices {} ",error)
3521 return None, vm_obj, vcenter_conect
3522
3523 def get_vm_obj(self, content, mob_id):
3524 """
3525 Method to get the vsphere VM object associated with a given morf ID
3526 Args:
3527 vapp_uuid - uuid of vApp/VM
3528 content - vCenter content object
3529 mob_id - mob_id of VM
3530
3531 Returns:
3532 VM and host object
3533 """
3534 vm_obj = None
3535 host_obj = None
3536 try :
3537 container = content.viewManager.CreateContainerView(content.rootFolder,
3538 [vim.VirtualMachine], True
3539 )
3540 for vm in container.view:
3541 mobID = vm._GetMoId()
3542 if mobID == mob_id:
3543 vm_obj = vm
3544 host_obj = vm_obj.runtime.host
3545 break
3546 except Exception as exp:
3547 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3548 return host_obj, vm_obj
3549
3550 def get_pci_devices(self, host, need_devices):
3551 """
3552 Method to get the details of pci devices on given host
3553 Args:
3554 host - vSphere host object
3555 need_devices - number of pci devices needed on host
3556
3557 Returns:
3558 array of pci devices
3559 """
3560 all_devices = []
3561 all_device_ids = []
3562 used_devices_ids = []
3563
3564 try:
3565 if host:
3566 pciPassthruInfo = host.config.pciPassthruInfo
3567 pciDevies = host.hardware.pciDevice
3568
3569 for pci_status in pciPassthruInfo:
3570 if pci_status.passthruActive:
3571 for device in pciDevies:
3572 if device.id == pci_status.id:
3573 all_device_ids.append(device.id)
3574 all_devices.append(device)
3575
3576 #check if devices are in use
3577 avalible_devices = all_devices
3578 for vm in host.vm:
3579 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3580 vm_devices = vm.config.hardware.device
3581 for device in vm_devices:
3582 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3583 if device.backing.id in all_device_ids:
3584 for use_device in avalible_devices:
3585 if use_device.id == device.backing.id:
3586 avalible_devices.remove(use_device)
3587 used_devices_ids.append(device.backing.id)
3588 self.logger.debug("Device {} from devices {}"\
3589 "is in use".format(device.backing.id,
3590 device)
3591 )
3592 if len(avalible_devices) < need_devices:
3593 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3594 need_devices))
3595 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3596 avalible_devices))
3597 return None
3598 else:
3599 required_devices = avalible_devices[:need_devices]
3600 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3601 len(avalible_devices),
3602 host,
3603 need_devices))
3604 self.logger.info("Retruning {} devices as {}".format(need_devices,
3605 required_devices ))
3606 return required_devices
3607
3608 except Exception as exp:
3609 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3610
3611 return None
3612
3613 def get_host_and_PCIdevices(self, content, need_devices):
3614 """
3615 Method to get the details of pci devices infromation on all hosts
3616
3617 Args:
3618 content - vSphere host object
3619 need_devices - number of pci devices needed on host
3620
3621 Returns:
3622 array of pci devices and host object
3623 """
3624 host_obj = None
3625 pci_device_objs = None
3626 try:
3627 if content:
3628 container = content.viewManager.CreateContainerView(content.rootFolder,
3629 [vim.HostSystem], True)
3630 for host in container.view:
3631 devices = self.get_pci_devices(host, need_devices)
3632 if devices:
3633 host_obj = host
3634 pci_device_objs = devices
3635 break
3636 except Exception as exp:
3637 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3638
3639 return host_obj,pci_device_objs
3640
3641 def relocate_vm(self, dest_host, vm) :
3642 """
3643 Method to get the relocate VM to new host
3644
3645 Args:
3646 dest_host - vSphere host object
3647 vm - vSphere VM object
3648
3649 Returns:
3650 task object
3651 """
3652 task = None
3653 try:
3654 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3655 task = vm.Relocate(relocate_spec)
3656 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3657 except Exception as exp:
3658 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3659 dest_host, vm, exp))
3660 return task
3661
3662 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3663 """
3664 Waits and provides updates on a vSphere task
3665 """
3666 while task.info.state == vim.TaskInfo.State.running:
3667 time.sleep(2)
3668
3669 if task.info.state == vim.TaskInfo.State.success:
3670 if task.info.result is not None and not hideResult:
3671 self.logger.info('{} completed successfully, result: {}'.format(
3672 actionName,
3673 task.info.result))
3674 else:
3675 self.logger.info('Task {} completed successfully.'.format(actionName))
3676 else:
3677 self.logger.error('{} did not complete successfully: {} '.format(
3678 actionName,
3679 task.info.error)
3680 )
3681
3682 return task.info.result
3683
3684 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3685 """
3686 Method to add pci device in given VM
3687
3688 Args:
3689 host_object - vSphere host object
3690 vm_object - vSphere VM object
3691 host_pci_dev - host_pci_dev must be one of the devices from the
3692 host_object.hardware.pciDevice list
3693 which is configured as a PCI passthrough device
3694
3695 Returns:
3696 task object
3697 """
3698 task = None
3699 if vm_object and host_object and host_pci_dev:
3700 try :
3701 #Add PCI device to VM
3702 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3703 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3704
3705 if host_pci_dev.id not in systemid_by_pciid:
3706 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3707 return None
3708
3709 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3710 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3711 id=host_pci_dev.id,
3712 systemId=systemid_by_pciid[host_pci_dev.id],
3713 vendorId=host_pci_dev.vendorId,
3714 deviceName=host_pci_dev.deviceName)
3715
3716 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3717
3718 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3719 new_device_config.operation = "add"
3720 vmConfigSpec = vim.vm.ConfigSpec()
3721 vmConfigSpec.deviceChange = [new_device_config]
3722
3723 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3724 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3725 host_pci_dev, vm_object, host_object)
3726 )
3727 except Exception as exp:
3728 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3729 host_pci_dev,
3730 vm_object,
3731 exp))
3732 return task
3733
3734 def get_vm_vcenter_info(self):
3735 """
3736 Method to get details of vCenter and vm
3737
3738 Args:
3739 vapp_uuid - uuid of vApp or VM
3740
3741 Returns:
3742 Moref Id of VM and deails of vCenter
3743 """
3744 vm_vcenter_info = {}
3745
3746 if self.vcenter_ip is not None:
3747 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3748 else:
3749 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3750 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3751 if self.vcenter_port is not None:
3752 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3753 else:
3754 raise vimconn.vimconnException(message="vCenter port is not provided."\
3755 " Please provide vCenter port while attaching datacenter to tenant in --config")
3756 if self.vcenter_user is not None:
3757 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3758 else:
3759 raise vimconn.vimconnException(message="vCenter user is not provided."\
3760 " Please provide vCenter user while attaching datacenter to tenant in --config")
3761
3762 if self.vcenter_password is not None:
3763 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3764 else:
3765 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3766 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3767
3768 return vm_vcenter_info
3769
3770
3771 def get_vm_pci_details(self, vmuuid):
3772 """
3773 Method to get VM PCI device details from vCenter
3774
3775 Args:
3776 vm_obj - vSphere VM object
3777
3778 Returns:
3779 dict of PCI devives attached to VM
3780
3781 """
3782 vm_pci_devices_info = {}
3783 try:
3784 vcenter_conect, content = self.get_vcenter_content()
3785 vm_moref_id = self.get_vm_moref_id(vmuuid)
3786 if vm_moref_id:
3787 #Get VM and its host
3788 if content:
3789 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3790 if host_obj and vm_obj:
3791 vm_pci_devices_info["host_name"]= host_obj.name
3792 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3793 for device in vm_obj.config.hardware.device:
3794 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3795 device_details={'devide_id':device.backing.id,
3796 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3797 }
3798 vm_pci_devices_info[device.deviceInfo.label] = device_details
3799 else:
3800 self.logger.error("Can not connect to vCenter while getting "\
3801 "PCI devices infromationn")
3802 return vm_pci_devices_info
3803 except Exception as exp:
3804 self.logger.error("Error occurred while getting VM infromationn"\
3805 " for VM : {}".format(exp))
3806 raise vimconn.vimconnException(message=exp)
3807
3808 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3809 """
3810 Method to add network adapter type to vm
3811 Args :
3812 network_name - name of network
3813 primary_nic_index - int value for primary nic index
3814 nicIndex - int value for nic index
3815 nic_type - specify model name to which add to vm
3816 Returns:
3817 None
3818 """
3819
3820 try:
3821 ip_address = None
3822 floating_ip = False
3823 if 'floating_ip' in net: floating_ip = net['floating_ip']
3824
3825 # Stub for ip_address feature
3826 if 'ip_address' in net: ip_address = net['ip_address']
3827
3828 if floating_ip:
3829 allocation_mode = "POOL"
3830 elif ip_address:
3831 allocation_mode = "MANUAL"
3832 else:
3833 allocation_mode = "DHCP"
3834
3835 if not nic_type:
3836 for vms in vapp._get_vms():
3837 vm_id = (vms.id).split(':')[-1]
3838
3839 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3840
3841 response = Http.get(url=url_rest_call,
3842 headers=self.vca.vcloud_session.get_vcloud_headers(),
3843 verify=self.vca.verify,
3844 logger=self.vca.logger)
3845
3846 if response.status_code == 403:
3847 response = self.retry_rest('GET', url_rest_call)
3848
3849 if response.status_code != 200:
3850 self.logger.error("REST call {} failed reason : {}"\
3851 "status code : {}".format(url_rest_call,
3852 response.content,
3853 response.status_code))
3854 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3855 "network connection section")
3856
3857 data = response.content
3858 if '<PrimaryNetworkConnectionIndex>' not in data:
3859 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3860 <NetworkConnection network="{}">
3861 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3862 <IsConnected>true</IsConnected>
3863 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3864 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3865 allocation_mode)
3866 # Stub for ip_address feature
3867 if ip_address:
3868 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3869 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3870
3871 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3872 else:
3873 new_item = """<NetworkConnection network="{}">
3874 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3875 <IsConnected>true</IsConnected>
3876 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3877 </NetworkConnection>""".format(network_name, nicIndex,
3878 allocation_mode)
3879 # Stub for ip_address feature
3880 if ip_address:
3881 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3882 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3883
3884 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3885
3886 headers = self.vca.vcloud_session.get_vcloud_headers()
3887 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3888 response = Http.put(url=url_rest_call, headers=headers, data=data,
3889 verify=self.vca.verify,
3890 logger=self.vca.logger)
3891
3892 if response.status_code == 403:
3893 add_headers = {'Content-Type': headers['Content-Type']}
3894 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3895
3896 if response.status_code != 202:
3897 self.logger.error("REST call {} failed reason : {}"\
3898 "status code : {} ".format(url_rest_call,
3899 response.content,
3900 response.status_code))
3901 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3902 "network connection section")
3903 else:
3904 nic_task = taskType.parseString(response.content, True)
3905 if isinstance(nic_task, GenericTask):
3906 self.vca.block_until_completed(nic_task)
3907 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3908 "default NIC type".format(vm_id))
3909 else:
3910 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3911 "connect NIC type".format(vm_id))
3912 else:
3913 for vms in vapp._get_vms():
3914 vm_id = (vms.id).split(':')[-1]
3915
3916 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3917
3918 response = Http.get(url=url_rest_call,
3919 headers=self.vca.vcloud_session.get_vcloud_headers(),
3920 verify=self.vca.verify,
3921 logger=self.vca.logger)
3922
3923 if response.status_code == 403:
3924 response = self.retry_rest('GET', url_rest_call)
3925
3926 if response.status_code != 200:
3927 self.logger.error("REST call {} failed reason : {}"\
3928 "status code : {}".format(url_rest_call,
3929 response.content,
3930 response.status_code))
3931 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3932 "network connection section")
3933 data = response.content
3934 if '<PrimaryNetworkConnectionIndex>' not in data:
3935 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3936 <NetworkConnection network="{}">
3937 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3938 <IsConnected>true</IsConnected>
3939 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3940 <NetworkAdapterType>{}</NetworkAdapterType>
3941 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3942 allocation_mode, nic_type)
3943 # Stub for ip_address feature
3944 if ip_address:
3945 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3946 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3947
3948 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3949 else:
3950 new_item = """<NetworkConnection network="{}">
3951 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3952 <IsConnected>true</IsConnected>
3953 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3954 <NetworkAdapterType>{}</NetworkAdapterType>
3955 </NetworkConnection>""".format(network_name, nicIndex,
3956 allocation_mode, nic_type)
3957 # Stub for ip_address feature
3958 if ip_address:
3959 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3960 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3961
3962 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3963
3964 headers = self.vca.vcloud_session.get_vcloud_headers()
3965 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3966 response = Http.put(url=url_rest_call, headers=headers, data=data,
3967 verify=self.vca.verify,
3968 logger=self.vca.logger)
3969
3970 if response.status_code == 403:
3971 add_headers = {'Content-Type': headers['Content-Type']}
3972 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3973
3974 if response.status_code != 202:
3975 self.logger.error("REST call {} failed reason : {}"\
3976 "status code : {}".format(url_rest_call,
3977 response.content,
3978 response.status_code))
3979 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3980 "network connection section")
3981 else:
3982 nic_task = taskType.parseString(response.content, True)
3983 if isinstance(nic_task, GenericTask):
3984 self.vca.block_until_completed(nic_task)
3985 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3986 "conneced to NIC type {}".format(vm_id, nic_type))
3987 else:
3988 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3989 "failed to connect NIC type {}".format(vm_id, nic_type))
3990 except Exception as exp:
3991 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3992 "while adding Network adapter")
3993 raise vimconn.vimconnException(message=exp)
3994
3995
3996 def set_numa_affinity(self, vmuuid, paired_threads_id):
3997 """
3998 Method to assign numa affinity in vm configuration parammeters
3999 Args :
4000 vmuuid - vm uuid
4001 paired_threads_id - one or more virtual processor
4002 numbers
4003 Returns:
4004 return if True
4005 """
4006 try:
4007 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
4008 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
4009 context = None
4010 if hasattr(ssl, '_create_unverified_context'):
4011 context = ssl._create_unverified_context()
4012 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
4013 pwd=self.passwd, port=int(vm_vcenter_port),
4014 sslContext=context)
4015 atexit.register(Disconnect, vcenter_conect)
4016 content = vcenter_conect.RetrieveContent()
4017
4018 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4019 if vm_obj:
4020 config_spec = vim.vm.ConfigSpec()
4021 config_spec.extraConfig = []
4022 opt = vim.option.OptionValue()
4023 opt.key = 'numa.nodeAffinity'
4024 opt.value = str(paired_threads_id)
4025 config_spec.extraConfig.append(opt)
4026 task = vm_obj.ReconfigVM_Task(config_spec)
4027 if task:
4028 result = self.wait_for_vcenter_task(task, vcenter_conect)
4029 extra_config = vm_obj.config.extraConfig
4030 flag = False
4031 for opts in extra_config:
4032 if 'numa.nodeAffinity' in opts.key:
4033 flag = True
4034 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4035 "value {} for vm {}".format(opt.value, vm_obj))
4036 if flag:
4037 return
4038 else:
4039 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4040 except Exception as exp:
4041 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4042 "for VM {} : {}".format(vm_obj, vm_moref_id))
4043 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4044 "affinity".format(exp))
4045
4046
4047 def cloud_init(self, vapp, cloud_config):
4048 """
4049 Method to inject ssh-key
4050 vapp - vapp object
4051 cloud_config a dictionary with:
4052 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4053 'users': (optional) list of users to be inserted, each item is a dict with:
4054 'name': (mandatory) user name,
4055 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4056 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4057 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4058 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4059 'dest': (mandatory) string with the destination absolute path
4060 'encoding': (optional, by default text). Can be one of:
4061 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4062 'content' (mandatory): string with the content of the file
4063 'permissions': (optional) string with file permissions, typically octal notation '0644'
4064 'owner': (optional) file owner, string with the format 'owner:group'
4065 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4066 """
4067 try:
4068 if not isinstance(cloud_config, dict):
4069 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4070 else:
4071 key_pairs = []
4072 userdata = []
4073 if "key-pairs" in cloud_config:
4074 key_pairs = cloud_config["key-pairs"]
4075
4076 if "users" in cloud_config:
4077 userdata = cloud_config["users"]
4078
4079 self.logger.debug("cloud_init : Guest os customization started..")
4080 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4081 self.guest_customization(vapp, customize_script)
4082
4083 except Exception as exp:
4084 self.logger.error("cloud_init : exception occurred while injecting "\
4085 "ssh-key")
4086 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4087 "ssh-key".format(exp))
4088
4089 def format_script(self, key_pairs=[], users_list=[]):
4090 bash_script = """
4091 #!/bin/bash
4092 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4093 if [ "$1" = "precustomization" ];then
4094 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4095 """
4096
4097 keys = "\n".join(key_pairs)
4098 if keys:
4099 keys_data = """
4100 if [ ! -d /root/.ssh ];then
4101 mkdir /root/.ssh
4102 chown root:root /root/.ssh
4103 chmod 700 /root/.ssh
4104 touch /root/.ssh/authorized_keys
4105 chown root:root /root/.ssh/authorized_keys
4106 chmod 600 /root/.ssh/authorized_keys
4107 # make centos with selinux happy
4108 which restorecon && restorecon -Rv /root/.ssh
4109 else
4110 touch /root/.ssh/authorized_keys
4111 chown root:root /root/.ssh/authorized_keys
4112 chmod 600 /root/.ssh/authorized_keys
4113 fi
4114 echo '{key}' >> /root/.ssh/authorized_keys
4115 """.format(key=keys)
4116
4117 bash_script+= keys_data
4118
4119 for user in users_list:
4120 if 'name' in user: user_name = user['name']
4121 if 'key-pairs' in user:
4122 user_keys = "\n".join(user['key-pairs'])
4123 else:
4124 user_keys = None
4125
4126 add_user_name = """
4127 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4128 """.format(user_name=user_name)
4129
4130 bash_script+= add_user_name
4131
4132 if user_keys:
4133 user_keys_data = """
4134 mkdir /home/{user_name}/.ssh
4135 chown {user_name}:{user_name} /home/{user_name}/.ssh
4136 chmod 700 /home/{user_name}/.ssh
4137 touch /home/{user_name}/.ssh/authorized_keys
4138 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4139 chmod 600 /home/{user_name}/.ssh/authorized_keys
4140 # make centos with selinux happy
4141 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4142 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4143 """.format(user_name=user_name,user_key=user_keys)
4144
4145 bash_script+= user_keys_data
4146
4147 return bash_script+"\n\tfi"
4148
4149 def guest_customization(self, vapp, customize_script):
4150 """
4151 Method to customize guest os
4152 vapp - Vapp object
4153 customize_script - Customize script to be run at first boot of VM.
4154 """
4155 for vm in vapp._get_vms():
4156 vm_name = vm.name
4157 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4158 if isinstance(task, GenericTask):
4159 self.vca.block_until_completed(task)
4160 self.logger.info("guest_customization : customized guest os task "\
4161 "completed for VM {}".format(vm_name))
4162 else:
4163 self.logger.error("guest_customization : task for customized guest os"\
4164 "failed for VM {}".format(vm_name))
4165 raise vimconn.vimconnException("guest_customization : failed to perform"\
4166 "guest os customization on VM {}".format(vm_name))
4167
4168 def add_new_disk(self, vapp_uuid, disk_size):
4169 """
4170 Method to create an empty vm disk
4171
4172 Args:
4173 vapp_uuid - is vapp identifier.
4174 disk_size - size of disk to be created in GB
4175
4176 Returns:
4177 None
4178 """
4179 status = False
4180 vm_details = None
4181 try:
4182 #Disk size in GB, convert it into MB
4183 if disk_size is not None:
4184 disk_size_mb = int(disk_size) * 1024
4185 vm_details = self.get_vapp_details_rest(vapp_uuid)
4186
4187 if vm_details and "vm_virtual_hardware" in vm_details:
4188 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4189 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4190 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4191
4192 except Exception as exp:
4193 msg = "Error occurred while creating new disk {}.".format(exp)
4194 self.rollback_newvm(vapp_uuid, msg)
4195
4196 if status:
4197 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4198 else:
4199 #If failed to add disk, delete VM
4200 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4201 self.rollback_newvm(vapp_uuid, msg)
4202
4203
4204 def add_new_disk_rest(self, disk_href, disk_size_mb):
4205 """
4206 Retrives vApp Disks section & add new empty disk
4207
4208 Args:
4209 disk_href: Disk section href to addd disk
4210 disk_size_mb: Disk size in MB
4211
4212 Returns: Status of add new disk task
4213 """
4214 status = False
4215 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4216 response = Http.get(url=disk_href,
4217 headers=self.vca.vcloud_session.get_vcloud_headers(),
4218 verify=self.vca.verify,
4219 logger=self.vca.logger)
4220
4221 if response.status_code == 403:
4222 response = self.retry_rest('GET', disk_href)
4223
4224 if response.status_code != requests.codes.ok:
4225 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4226 .format(disk_href, response.status_code))
4227 return status
4228 try:
4229 #Find but type & max of instance IDs assigned to disks
4230 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4231 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4232 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4233 instance_id = 0
4234 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4235 if item.find("rasd:Description",namespaces).text == "Hard disk":
4236 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4237 if inst_id > instance_id:
4238 instance_id = inst_id
4239 disk_item = item.find("rasd:HostResource" ,namespaces)
4240 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4241 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4242
4243 instance_id = instance_id + 1
4244 new_item = """<Item>
4245 <rasd:Description>Hard disk</rasd:Description>
4246 <rasd:ElementName>New disk</rasd:ElementName>
4247 <rasd:HostResource
4248 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4249 vcloud:capacity="{}"
4250 vcloud:busSubType="{}"
4251 vcloud:busType="{}"></rasd:HostResource>
4252 <rasd:InstanceID>{}</rasd:InstanceID>
4253 <rasd:ResourceType>17</rasd:ResourceType>
4254 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4255
4256 new_data = response.content
4257 #Add new item at the bottom
4258 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4259
4260 # Send PUT request to modify virtual hardware section with new disk
4261 headers = self.vca.vcloud_session.get_vcloud_headers()
4262 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4263
4264 response = Http.put(url=disk_href,
4265 data=new_data,
4266 headers=headers,
4267 verify=self.vca.verify, logger=self.logger)
4268
4269 if response.status_code == 403:
4270 add_headers = {'Content-Type': headers['Content-Type']}
4271 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4272
4273 if response.status_code != 202:
4274 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4275 .format(disk_href, response.status_code, response.content))
4276 else:
4277 add_disk_task = taskType.parseString(response.content, True)
4278 if type(add_disk_task) is GenericTask:
4279 status = self.vca.block_until_completed(add_disk_task)
4280 if not status:
4281 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4282
4283 except Exception as exp:
4284 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4285
4286 return status
4287
4288
4289 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4290 """
4291 Method to add existing disk to vm
4292 Args :
4293 catalogs - List of VDC catalogs
4294 image_id - Catalog ID
4295 template_name - Name of template in catalog
4296 vapp_uuid - UUID of vApp
4297 Returns:
4298 None
4299 """
4300 disk_info = None
4301 vcenter_conect, content = self.get_vcenter_content()
4302 #find moref-id of vm in image
4303 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4304 image_id=image_id,
4305 )
4306
4307 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4308 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4309 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4310 if catalog_vm_moref_id:
4311 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4312 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4313 if catalog_vm_obj:
4314 #find existing disk
4315 disk_info = self.find_disk(catalog_vm_obj)
4316 else:
4317 exp_msg = "No VM with image id {} found".format(image_id)
4318 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4319 else:
4320 exp_msg = "No Image found with image ID {} ".format(image_id)
4321 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4322
4323 if disk_info:
4324 self.logger.info("Existing disk_info : {}".format(disk_info))
4325 #get VM
4326 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4327 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4328 if vm_obj:
4329 status = self.add_disk(vcenter_conect=vcenter_conect,
4330 vm=vm_obj,
4331 disk_info=disk_info,
4332 size=size,
4333 vapp_uuid=vapp_uuid
4334 )
4335 if status:
4336 self.logger.info("Disk from image id {} added to {}".format(image_id,
4337 vm_obj.config.name)
4338 )
4339 else:
4340 msg = "No disk found with image id {} to add in VM {}".format(
4341 image_id,
4342 vm_obj.config.name)
4343 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4344
4345
4346 def find_disk(self, vm_obj):
4347 """
4348 Method to find details of existing disk in VM
4349 Args :
4350 vm_obj - vCenter object of VM
4351 image_id - Catalog ID
4352 Returns:
4353 disk_info : dict of disk details
4354 """
4355 disk_info = {}
4356 if vm_obj:
4357 try:
4358 devices = vm_obj.config.hardware.device
4359 for device in devices:
4360 if type(device) is vim.vm.device.VirtualDisk:
4361 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4362 disk_info["full_path"] = device.backing.fileName
4363 disk_info["datastore"] = device.backing.datastore
4364 disk_info["capacityKB"] = device.capacityInKB
4365 break
4366 except Exception as exp:
4367 self.logger.error("find_disk() : exception occurred while "\
4368 "getting existing disk details :{}".format(exp))
4369 return disk_info
4370
4371
4372 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4373 """
4374 Method to add existing disk in VM
4375 Args :
4376 vcenter_conect - vCenter content object
4377 vm - vCenter vm object
4378 disk_info : dict of disk details
4379 Returns:
4380 status : status of add disk task
4381 """
4382 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4383 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4384 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4385 if size is not None:
4386 #Convert size from GB to KB
4387 sizeKB = int(size) * 1024 * 1024
4388 #compare size of existing disk and user given size.Assign whicherver is greater
4389 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4390 sizeKB, capacityKB))
4391 if sizeKB > capacityKB:
4392 capacityKB = sizeKB
4393
4394 if datastore and fullpath and capacityKB:
4395 try:
4396 spec = vim.vm.ConfigSpec()
4397 # get all disks on a VM, set unit_number to the next available
4398 unit_number = 0
4399 for dev in vm.config.hardware.device:
4400 if hasattr(dev.backing, 'fileName'):
4401 unit_number = int(dev.unitNumber) + 1
4402 # unit_number 7 reserved for scsi controller
4403 if unit_number == 7:
4404 unit_number += 1
4405 if isinstance(dev, vim.vm.device.VirtualDisk):
4406 #vim.vm.device.VirtualSCSIController
4407 controller_key = dev.controllerKey
4408
4409 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4410 unit_number, controller_key))
4411 # add disk here
4412 dev_changes = []
4413 disk_spec = vim.vm.device.VirtualDeviceSpec()
4414 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4415 disk_spec.device = vim.vm.device.VirtualDisk()
4416 disk_spec.device.backing = \
4417 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4418 disk_spec.device.backing.thinProvisioned = True
4419 disk_spec.device.backing.diskMode = 'persistent'
4420 disk_spec.device.backing.datastore = datastore
4421 disk_spec.device.backing.fileName = fullpath
4422
4423 disk_spec.device.unitNumber = unit_number
4424 disk_spec.device.capacityInKB = capacityKB
4425 disk_spec.device.controllerKey = controller_key
4426 dev_changes.append(disk_spec)
4427 spec.deviceChange = dev_changes
4428 task = vm.ReconfigVM_Task(spec=spec)
4429 status = self.wait_for_vcenter_task(task, vcenter_conect)
4430 return status
4431 except Exception as exp:
4432 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4433 "{} to vm {}".format(exp,
4434 fullpath,
4435 vm.config.name)
4436 self.rollback_newvm(vapp_uuid, exp_msg)
4437 else:
4438 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4439 self.rollback_newvm(vapp_uuid, msg)
4440
4441
4442 def get_vcenter_content(self):
4443 """
4444 Get the vsphere content object
4445 """
4446 try:
4447 vm_vcenter_info = self.get_vm_vcenter_info()
4448 except Exception as exp:
4449 self.logger.error("Error occurred while getting vCenter infromationn"\
4450 " for VM : {}".format(exp))
4451 raise vimconn.vimconnException(message=exp)
4452
4453 context = None
4454 if hasattr(ssl, '_create_unverified_context'):
4455 context = ssl._create_unverified_context()
4456
4457 vcenter_conect = SmartConnect(
4458 host=vm_vcenter_info["vm_vcenter_ip"],
4459 user=vm_vcenter_info["vm_vcenter_user"],
4460 pwd=vm_vcenter_info["vm_vcenter_password"],
4461 port=int(vm_vcenter_info["vm_vcenter_port"]),
4462 sslContext=context
4463 )
4464 atexit.register(Disconnect, vcenter_conect)
4465 content = vcenter_conect.RetrieveContent()
4466 return vcenter_conect, content
4467
4468
4469 def get_vm_moref_id(self, vapp_uuid):
4470 """
4471 Get the moref_id of given VM
4472 """
4473 try:
4474 if vapp_uuid:
4475 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4476 if vm_details and "vm_vcenter_info" in vm_details:
4477 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4478
4479 return vm_moref_id
4480
4481 except Exception as exp:
4482 self.logger.error("Error occurred while getting VM moref ID "\
4483 " for VM : {}".format(exp))
4484 return None
4485
4486
4487 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4488 """
4489 Method to get vApp template details
4490 Args :
4491 catalogs - list of VDC catalogs
4492 image_id - Catalog ID to find
4493 template_name : template name in catalog
4494 Returns:
4495 parsed_respond : dict of vApp tempalte details
4496 """
4497 parsed_response = {}
4498
4499 vca = self.connect_as_admin()
4500 if not vca:
4501 raise vimconn.vimconnConnectionException("self.connect() is failed")
4502
4503 try:
4504 catalog = self.get_catalog_obj(image_id, catalogs)
4505 if catalog:
4506 template_name = self.get_catalogbyid(image_id, catalogs)
4507 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4508 if len(catalog_items) == 1:
4509 response = Http.get(catalog_items[0].get_href(),
4510 headers=vca.vcloud_session.get_vcloud_headers(),
4511 verify=vca.verify,
4512 logger=vca.logger)
4513 catalogItem = XmlElementTree.fromstring(response.content)
4514 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4515 vapp_tempalte_href = entity.get("href")
4516 #get vapp details and parse moref id
4517
4518 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4519 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4520 'vmw': 'http://www.vmware.com/schema/ovf',
4521 'vm': 'http://www.vmware.com/vcloud/v1.5',
4522 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4523 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4524 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4525 }
4526
4527 if vca.vcloud_session and vca.vcloud_session.organization:
4528 response = Http.get(url=vapp_tempalte_href,
4529 headers=vca.vcloud_session.get_vcloud_headers(),
4530 verify=vca.verify,
4531 logger=vca.logger
4532 )
4533
4534 if response.status_code != requests.codes.ok:
4535 self.logger.debug("REST API call {} failed. Return status code {}".format(
4536 vapp_tempalte_href, response.status_code))
4537
4538 else:
4539 xmlroot_respond = XmlElementTree.fromstring(response.content)
4540 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4541 if children_section is not None:
4542 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4543 if vCloud_extension_section is not None:
4544 vm_vcenter_info = {}
4545 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4546 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4547 if vmext is not None:
4548 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4549 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4550
4551 except Exception as exp :
4552 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4553
4554 return parsed_response
4555
4556
4557 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4558 """
4559 Method to delete vApp
4560 Args :
4561 vapp_uuid - vApp UUID
4562 msg - Error message to be logged
4563 exp_type : Exception type
4564 Returns:
4565 None
4566 """
4567 if vapp_uuid:
4568 status = self.delete_vminstance(vapp_uuid)
4569 else:
4570 msg = "No vApp ID"
4571 self.logger.error(msg)
4572 if exp_type == "Genric":
4573 raise vimconn.vimconnException(msg)
4574 elif exp_type == "NotFound":
4575 raise vimconn.vimconnNotFoundException(message=msg)
4576
4577 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4578 """
4579 Method to attach SRIOV adapters to VM
4580
4581 Args:
4582 vapp_uuid - uuid of vApp/VM
4583 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4584 vmname_andid - vmname
4585
4586 Returns:
4587 The status of add SRIOV adapter task , vm object and
4588 vcenter_conect object
4589 """
4590 vm_obj = None
4591 vcenter_conect, content = self.get_vcenter_content()
4592 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4593
4594 if vm_moref_id:
4595 try:
4596 no_of_sriov_devices = len(sriov_nets)
4597 if no_of_sriov_devices > 0:
4598 #Get VM and its host
4599 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4600 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4601 if host_obj and vm_obj:
4602 #get SRIOV devies from host on which vapp is currently installed
4603 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4604 no_of_sriov_devices,
4605 )
4606
4607 if len(avilable_sriov_devices) == 0:
4608 #find other hosts with active pci devices
4609 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4610 content,
4611 no_of_sriov_devices,
4612 )
4613
4614 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4615 #Migrate vm to the host where SRIOV devices are available
4616 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4617 new_host_obj))
4618 task = self.relocate_vm(new_host_obj, vm_obj)
4619 if task is not None:
4620 result = self.wait_for_vcenter_task(task, vcenter_conect)
4621 self.logger.info("Migrate VM status: {}".format(result))
4622 host_obj = new_host_obj
4623 else:
4624 self.logger.info("Fail to migrate VM : {}".format(result))
4625 raise vimconn.vimconnNotFoundException(
4626 "Fail to migrate VM : {} to host {}".format(
4627 vmname_andid,
4628 new_host_obj)
4629 )
4630
4631 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4632 #Add SRIOV devices one by one
4633 for sriov_net in sriov_nets:
4634 network_name = sriov_net.get('net_id')
4635 dvs_portgr_name = self.create_dvPort_group(network_name)
4636 if sriov_net.get('type') == "VF":
4637 #add vlan ID ,Modify portgroup for vlan ID
4638 self.configure_vlanID(content, vcenter_conect, network_name)
4639
4640 task = self.add_sriov_to_vm(content,
4641 vm_obj,
4642 host_obj,
4643 network_name,
4644 avilable_sriov_devices[0]
4645 )
4646 if task:
4647 status= self.wait_for_vcenter_task(task, vcenter_conect)
4648 if status:
4649 self.logger.info("Added SRIOV {} to VM {}".format(
4650 no_of_sriov_devices,
4651 str(vm_obj)))
4652 else:
4653 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4654 no_of_sriov_devices,
4655 str(vm_obj)))
4656 raise vimconn.vimconnUnexpectedResponse(
4657 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4658 )
4659 return True, vm_obj, vcenter_conect
4660 else:
4661 self.logger.error("Currently there is no host with"\
4662 " {} number of avaialble SRIOV "\
4663 "VFs required for VM {}".format(
4664 no_of_sriov_devices,
4665 vmname_andid)
4666 )
4667 raise vimconn.vimconnNotFoundException(
4668 "Currently there is no host with {} "\
4669 "number of avaialble SRIOV devices required for VM {}".format(
4670 no_of_sriov_devices,
4671 vmname_andid))
4672 else:
4673 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4674
4675 except vmodl.MethodFault as error:
4676 self.logger.error("Error occurred while adding SRIOV {} ",error)
4677 return None, vm_obj, vcenter_conect
4678
4679
4680 def get_sriov_devices(self,host, no_of_vfs):
4681 """
4682 Method to get the details of SRIOV devices on given host
4683 Args:
4684 host - vSphere host object
4685 no_of_vfs - number of VFs needed on host
4686
4687 Returns:
4688 array of SRIOV devices
4689 """
4690 sriovInfo=[]
4691 if host:
4692 for device in host.config.pciPassthruInfo:
4693 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4694 if device.numVirtualFunction >= no_of_vfs:
4695 sriovInfo.append(device)
4696 break
4697 return sriovInfo
4698
4699
4700 def get_host_and_sriov_devices(self, content, no_of_vfs):
4701 """
4702 Method to get the details of SRIOV devices infromation on all hosts
4703
4704 Args:
4705 content - vSphere host object
4706 no_of_vfs - number of pci VFs needed on host
4707
4708 Returns:
4709 array of SRIOV devices and host object
4710 """
4711 host_obj = None
4712 sriov_device_objs = None
4713 try:
4714 if content:
4715 container = content.viewManager.CreateContainerView(content.rootFolder,
4716 [vim.HostSystem], True)
4717 for host in container.view:
4718 devices = self.get_sriov_devices(host, no_of_vfs)
4719 if devices:
4720 host_obj = host
4721 sriov_device_objs = devices
4722 break
4723 except Exception as exp:
4724 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4725
4726 return host_obj,sriov_device_objs
4727
4728
4729 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4730 """
4731 Method to add SRIOV adapter to vm
4732
4733 Args:
4734 host_obj - vSphere host object
4735 vm_obj - vSphere vm object
4736 content - vCenter content object
4737 network_name - name of distributed virtaul portgroup
4738 sriov_device - SRIOV device info
4739
4740 Returns:
4741 task object
4742 """
4743 devices = []
4744 vnic_label = "sriov nic"
4745 try:
4746 dvs_portgr = self.get_dvport_group(network_name)
4747 network_name = dvs_portgr.name
4748 nic = vim.vm.device.VirtualDeviceSpec()
4749 # VM device
4750 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4751 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4752 nic.device.addressType = 'assigned'
4753 #nic.device.key = 13016
4754 nic.device.deviceInfo = vim.Description()
4755 nic.device.deviceInfo.label = vnic_label
4756 nic.device.deviceInfo.summary = network_name
4757 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4758
4759 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4760 nic.device.backing.deviceName = network_name
4761 nic.device.backing.useAutoDetect = False
4762 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4763 nic.device.connectable.startConnected = True
4764 nic.device.connectable.allowGuestControl = True
4765
4766 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4767 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4768 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4769
4770 devices.append(nic)
4771 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4772 task = vm_obj.ReconfigVM_Task(vmconf)
4773 return task
4774 except Exception as exp:
4775 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4776 return None
4777
4778
4779 def create_dvPort_group(self, network_name):
4780 """
4781 Method to create disributed virtual portgroup
4782
4783 Args:
4784 network_name - name of network/portgroup
4785
4786 Returns:
4787 portgroup key
4788 """
4789 try:
4790 new_network_name = [network_name, '-', str(uuid.uuid4())]
4791 network_name=''.join(new_network_name)
4792 vcenter_conect, content = self.get_vcenter_content()
4793
4794 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4795 if dv_switch:
4796 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4797 dv_pg_spec.name = network_name
4798
4799 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4800 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4801 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4802 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4803 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4804 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4805
4806 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4807 self.wait_for_vcenter_task(task, vcenter_conect)
4808
4809 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4810 if dvPort_group:
4811 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4812 return dvPort_group.key
4813 else:
4814 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4815
4816 except Exception as exp:
4817 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4818 " : {}".format(network_name, exp))
4819 return None
4820
4821 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4822 """
4823 Method to reconfigure disributed virtual portgroup
4824
4825 Args:
4826 dvPort_group_name - name of disributed virtual portgroup
4827 content - vCenter content object
4828 config_info - disributed virtual portgroup configuration
4829
4830 Returns:
4831 task object
4832 """
4833 try:
4834 dvPort_group = self.get_dvport_group(dvPort_group_name)
4835 if dvPort_group:
4836 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4837 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4838 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4839 if "vlanID" in config_info:
4840 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4841 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4842
4843 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4844 return task
4845 else:
4846 return None
4847 except Exception as exp:
4848 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4849 " : {}".format(dvPort_group_name, exp))
4850 return None
4851
4852
4853 def destroy_dvport_group(self , dvPort_group_name):
4854 """
4855 Method to destroy disributed virtual portgroup
4856
4857 Args:
4858 network_name - name of network/portgroup
4859
4860 Returns:
4861 True if portgroup successfully got deleted else false
4862 """
4863 vcenter_conect, content = self.get_vcenter_content()
4864 try:
4865 status = None
4866 dvPort_group = self.get_dvport_group(dvPort_group_name)
4867 if dvPort_group:
4868 task = dvPort_group.Destroy_Task()
4869 status = self.wait_for_vcenter_task(task, vcenter_conect)
4870 return status
4871 except vmodl.MethodFault as exp:
4872 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4873 exp, dvPort_group_name))
4874 return None
4875
4876
4877 def get_dvport_group(self, dvPort_group_name):
4878 """
4879 Method to get disributed virtual portgroup
4880
4881 Args:
4882 network_name - name of network/portgroup
4883
4884 Returns:
4885 portgroup object
4886 """
4887 vcenter_conect, content = self.get_vcenter_content()
4888 dvPort_group = None
4889 try:
4890 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4891 for item in container.view:
4892 if item.key == dvPort_group_name:
4893 dvPort_group = item
4894 break
4895 return dvPort_group
4896 except vmodl.MethodFault as exp:
4897 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4898 exp, dvPort_group_name))
4899 return None
4900
4901 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4902 """
4903 Method to get disributed virtual portgroup vlanID
4904
4905 Args:
4906 network_name - name of network/portgroup
4907
4908 Returns:
4909 vlan ID
4910 """
4911 vlanId = None
4912 try:
4913 dvPort_group = self.get_dvport_group(dvPort_group_name)
4914 if dvPort_group:
4915 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4916 except vmodl.MethodFault as exp:
4917 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4918 exp, dvPort_group_name))
4919 return vlanId
4920
4921
4922 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4923 """
4924 Method to configure vlanID in disributed virtual portgroup vlanID
4925
4926 Args:
4927 network_name - name of network/portgroup
4928
4929 Returns:
4930 None
4931 """
4932 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4933 if vlanID == 0:
4934 #configure vlanID
4935 vlanID = self.genrate_vlanID(dvPort_group_name)
4936 config = {"vlanID":vlanID}
4937 task = self.reconfig_portgroup(content, dvPort_group_name,
4938 config_info=config)
4939 if task:
4940 status= self.wait_for_vcenter_task(task, vcenter_conect)
4941 if status:
4942 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4943 dvPort_group_name,vlanID))
4944 else:
4945 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4946 dvPort_group_name, vlanID))
4947
4948
4949 def genrate_vlanID(self, network_name):
4950 """
4951 Method to get unused vlanID
4952 Args:
4953 network_name - name of network/portgroup
4954 Returns:
4955 vlanID
4956 """
4957 vlan_id = None
4958 used_ids = []
4959 if self.config.get('vlanID_range') == None:
4960 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4961 "at config value before creating sriov network with vlan tag")
4962 if "used_vlanIDs" not in self.persistent_info:
4963 self.persistent_info["used_vlanIDs"] = {}
4964 else:
4965 used_ids = self.persistent_info["used_vlanIDs"].values()
4966
4967 for vlanID_range in self.config.get('vlanID_range'):
4968 start_vlanid , end_vlanid = vlanID_range.split("-")
4969 if start_vlanid > end_vlanid:
4970 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4971 vlanID_range))
4972
4973 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4974 if id not in used_ids:
4975 vlan_id = id
4976 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4977 return vlan_id
4978 if vlan_id is None:
4979 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4980
4981
4982 def get_obj(self, content, vimtype, name):
4983 """
4984 Get the vsphere object associated with a given text name
4985 """
4986 obj = None
4987 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4988 for item in container.view:
4989 if item.name == name:
4990 obj = item
4991 break
4992 return obj
4993
4994
4995 def insert_media_to_vm(self, vapp, image_id):
4996 """
4997 Method to insert media CD-ROM (ISO image) from catalog to vm.
4998 vapp - vapp object to get vm id
4999 Image_id - image id for cdrom to be inerted to vm
5000 """
5001 # create connection object
5002 vca = self.connect()
5003 try:
5004 # fetching catalog details
5005 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5006 response = Http.get(url=rest_url,
5007 headers=vca.vcloud_session.get_vcloud_headers(),
5008 verify=vca.verify,
5009 logger=vca.logger)
5010
5011 if response.status_code != 200:
5012 self.logger.error("REST call {} failed reason : {}"\
5013 "status code : {}".format(url_rest_call,
5014 response.content,
5015 response.status_code))
5016 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5017 "catalog details")
5018 # searching iso name and id
5019 iso_name,media_id = self.get_media_details(vca, response.content)
5020
5021 if iso_name and media_id:
5022 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5023 <ns6:MediaInsertOrEjectParams
5024 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5025 <ns6:Media
5026 type="application/vnd.vmware.vcloud.media+xml"
5027 name="{}.iso"
5028 id="urn:vcloud:media:{}"
5029 href="https://{}/api/media/{}"/>
5030 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5031 vca.host,media_id)
5032
5033 for vms in vapp._get_vms():
5034 vm_id = (vms.id).split(':')[-1]
5035
5036 headers = vca.vcloud_session.get_vcloud_headers()
5037 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5038 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5039
5040 response = Http.post(url=rest_url,
5041 headers=headers,
5042 data=data,
5043 verify=vca.verify,
5044 logger=vca.logger)
5045
5046 if response.status_code != 202:
5047 self.logger.error("Failed to insert CD-ROM to vm")
5048 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5049 "ISO image to vm")
5050 else:
5051 task = taskType.parseString(response.content, True)
5052 if isinstance(task, GenericTask):
5053 vca.block_until_completed(task)
5054 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5055 " image to vm {}".format(vm_id))
5056 except Exception as exp:
5057 self.logger.error("insert_media_to_vm() : exception occurred "\
5058 "while inserting media CD-ROM")
5059 raise vimconn.vimconnException(message=exp)
5060
5061
5062 def get_media_details(self, vca, content):
5063 """
5064 Method to get catalog item details
5065 vca - connection object
5066 content - Catalog details
5067 Return - Media name, media id
5068 """
5069 cataloghref_list = []
5070 try:
5071 if content:
5072 vm_list_xmlroot = XmlElementTree.fromstring(content)
5073 for child in vm_list_xmlroot.iter():
5074 if 'CatalogItem' in child.tag:
5075 cataloghref_list.append(child.attrib.get('href'))
5076 if cataloghref_list is not None:
5077 for href in cataloghref_list:
5078 if href:
5079 response = Http.get(url=href,
5080 headers=vca.vcloud_session.get_vcloud_headers(),
5081 verify=vca.verify,
5082 logger=vca.logger)
5083 if response.status_code != 200:
5084 self.logger.error("REST call {} failed reason : {}"\
5085 "status code : {}".format(href,
5086 response.content,
5087 response.status_code))
5088 raise vimconn.vimconnException("get_media_details : Failed to get "\
5089 "catalogitem details")
5090 list_xmlroot = XmlElementTree.fromstring(response.content)
5091 for child in list_xmlroot.iter():
5092 if 'Entity' in child.tag:
5093 if 'media' in child.attrib.get('href'):
5094 name = child.attrib.get('name')
5095 media_id = child.attrib.get('href').split('/').pop()
5096 return name,media_id
5097 else:
5098 self.logger.debug("Media name and id not found")
5099 return False,False
5100 except Exception as exp:
5101 self.logger.error("get_media_details : exception occurred "\
5102 "getting media details")
5103 raise vimconn.vimconnException(message=exp)
5104
5105
5106 def retry_rest(self, method, url, add_headers=None, data=None):
5107 """ Method to get Token & retry respective REST request
5108 Args:
5109 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5110 url - request url to be used
5111 add_headers - Additional headers (optional)
5112 data - Request payload data to be passed in request
5113 Returns:
5114 response - Response of request
5115 """
5116 response = None
5117
5118 #Get token
5119 self.get_token()
5120
5121 headers=self.vca.vcloud_session.get_vcloud_headers()
5122
5123 if add_headers:
5124 headers.update(add_headers)
5125
5126 if method == 'GET':
5127 response = Http.get(url=url,
5128 headers=headers,
5129 verify=self.vca.verify,
5130 logger=self.vca.logger)
5131 elif method == 'PUT':
5132 response = Http.put(url=url,
5133 data=data,
5134 headers=headers,
5135 verify=self.vca.verify,
5136 logger=self.logger)
5137 elif method == 'POST':
5138 response = Http.post(url=url,
5139 headers=headers,
5140 data=data,
5141 verify=self.vca.verify,
5142 logger=self.vca.logger)
5143 elif method == 'DELETE':
5144 response = Http.delete(url=url,
5145 headers=headers,
5146 verify=self.vca.verify,
5147 logger=self.vca.logger)
5148 return response
5149
5150
5151 def get_token(self):
5152 """ Generate a new token if expired
5153
5154 Returns:
5155 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5156 """
5157 vca = None
5158
5159 try:
5160 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5161 self.user,
5162 self.org_name))
5163 vca = VCA(host=self.url,
5164 username=self.user,
5165 service_type=STANDALONE,
5166 version=VCAVERSION,
5167 verify=False,
5168 log=False)
5169
5170 result = vca.login(password=self.passwd, org=self.org_name)
5171 if result is True:
5172 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5173 if result is True:
5174 self.logger.info(
5175 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5176 #Update vca
5177 self.vca = vca
5178 return
5179
5180 except:
5181 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5182 "{} as user: {}".format(self.org_name, self.user))
5183
5184 if not vca or not result:
5185 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5186
5187
5188 def get_vdc_details(self):
5189 """ Get VDC details using pyVcloud Lib
5190
5191 Returns vdc object
5192 """
5193 vdc = self.vca.get_vdc(self.tenant_name)
5194
5195 #Retry once, if failed by refreshing token
5196 if vdc is None:
5197 self.get_token()
5198 vdc = self.vca.get_vdc(self.tenant_name)
5199
5200 return vdc
5201
5202