unit test for vmware connector using mock
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 if not networks:
675 vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
676
677 for network in networks:
678 vdc_network_id = network.get_id().split(":")
679 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
680 filter_dict["name"] = network.get_name()
681 filter_dict["id"] = vdc_network_id[3]
682 filter_dict["shared"] = network.get_IsShared()
683 filter_dict["tenant_id"] = vdc_id
684 if network.get_status() == 1:
685 filter_dict["admin_state_up"] = True
686 else:
687 filter_dict["admin_state_up"] = False
688 filter_dict["status"] = "ACTIVE"
689 filter_dict["type"] = "bridge"
690 self.logger.debug("Returning {}".format(filter_dict))
691 return filter_dict
692 else:
693 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
694
695 except Exception as e:
696 self.logger.debug("Error in get_network")
697 self.logger.debug(traceback.format_exc())
698 if isinstance(e, vimconn.vimconnException):
699 raise
700 else:
701 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
702
703 return filter_dict
704
705 def delete_network(self, net_id):
706 """
707 Method Deletes a tenant network from VIM, provide the network id.
708
709 Returns the network identifier or raise an exception
710 """
711
712 # ############# Stub code for SRIOV #################
713 # dvport_group = self.get_dvport_group(net_id)
714 # if dvport_group:
715 # #delete portgroup
716 # status = self.destroy_dvport_group(net_id)
717 # if status:
718 # # Remove vlanID from persistent info
719 # if net_id in self.persistent_info["used_vlanIDs"]:
720 # del self.persistent_info["used_vlanIDs"][net_id]
721 #
722 # return net_id
723
724 vcd_network = self.get_vcd_network(network_uuid=net_id)
725 if vcd_network is not None and vcd_network:
726 if self.delete_network_action(network_uuid=net_id):
727 return net_id
728 else:
729 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
730
731 def refresh_nets_status(self, net_list):
732 """Get the status of the networks
733 Params: the list of network identifiers
734 Returns a dictionary with:
735 net_id: #VIM id of this network
736 status: #Mandatory. Text with one of:
737 # DELETED (not found at vim)
738 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
739 # OTHER (Vim reported other status not understood)
740 # ERROR (VIM indicates an ERROR status)
741 # ACTIVE, INACTIVE, DOWN (admin down),
742 # BUILD (on building process)
743 #
744 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
745 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
746
747 """
748
749 dict_entry = {}
750 try:
751 for net in net_list:
752 errormsg = ''
753 vcd_network = self.get_vcd_network(network_uuid=net)
754 if vcd_network is not None and vcd_network:
755 if vcd_network['status'] == '1':
756 status = 'ACTIVE'
757 else:
758 status = 'DOWN'
759 else:
760 status = 'DELETED'
761 errormsg = 'Network not found.'
762
763 dict_entry[net] = {'status': status, 'error_msg': errormsg,
764 'vim_info': yaml.safe_dump(vcd_network)}
765 except:
766 self.logger.debug("Error in refresh_nets_status")
767 self.logger.debug(traceback.format_exc())
768
769 return dict_entry
770
771 def get_flavor(self, flavor_id):
772 """Obtain flavor details from the VIM
773 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
774 """
775 if flavor_id not in vimconnector.flavorlist:
776 raise vimconn.vimconnNotFoundException("Flavor not found.")
777 return vimconnector.flavorlist[flavor_id]
778
779 def new_flavor(self, flavor_data):
780 """Adds a tenant flavor to VIM
781 flavor_data contains a dictionary with information, keys:
782 name: flavor name
783 ram: memory (cloud type) in MBytes
784 vpcus: cpus (cloud type)
785 extended: EPA parameters
786 - numas: #items requested in same NUMA
787 memory: number of 1G huge pages memory
788 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
789 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
790 - name: interface name
791 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
792 bandwidth: X Gbps; requested guarantee bandwidth
793 vpci: requested virtual PCI address
794 disk: disk size
795 is_public:
796 #TODO to concrete
797 Returns the flavor identifier"""
798
799 # generate a new uuid put to internal dict and return it.
800 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
801 new_flavor=flavor_data
802 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
803 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
804 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
805
806 if not isinstance(ram, int):
807 raise vimconn.vimconnException("Non-integer value for ram")
808 elif not isinstance(cpu, int):
809 raise vimconn.vimconnException("Non-integer value for cpu")
810 elif not isinstance(disk, int):
811 raise vimconn.vimconnException("Non-integer value for disk")
812
813 extended_flv = flavor_data.get("extended")
814 if extended_flv:
815 numas=extended_flv.get("numas")
816 if numas:
817 for numa in numas:
818 #overwrite ram and vcpus
819 ram = numa['memory']*1024
820 if 'paired-threads' in numa:
821 cpu = numa['paired-threads']*2
822 elif 'cores' in numa:
823 cpu = numa['cores']
824 elif 'threads' in numa:
825 cpu = numa['threads']
826
827 new_flavor[FLAVOR_RAM_KEY] = ram
828 new_flavor[FLAVOR_VCPUS_KEY] = cpu
829 new_flavor[FLAVOR_DISK_KEY] = disk
830 # generate a new uuid put to internal dict and return it.
831 flavor_id = uuid.uuid4()
832 vimconnector.flavorlist[str(flavor_id)] = new_flavor
833 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
834
835 return str(flavor_id)
836
837 def delete_flavor(self, flavor_id):
838 """Deletes a tenant flavor from VIM identify by its id
839
840 Returns the used id or raise an exception
841 """
842 if flavor_id not in vimconnector.flavorlist:
843 raise vimconn.vimconnNotFoundException("Flavor not found.")
844
845 vimconnector.flavorlist.pop(flavor_id, None)
846 return flavor_id
847
848 def new_image(self, image_dict):
849 """
850 Adds a tenant image to VIM
851 Returns:
852 200, image-id if the image is created
853 <0, message if there is an error
854 """
855
856 return self.get_image_id_from_path(image_dict['location'])
857
858 def delete_image(self, image_id):
859 """
860 Deletes a tenant image from VIM
861 Args:
862 image_id is ID of Image to be deleted
863 Return:
864 returns the image identifier in UUID format or raises an exception on error
865 """
866 vca = self.connect_as_admin()
867 if not vca:
868 raise vimconn.vimconnConnectionException("self.connect() is failed")
869 # Get Catalog details
870 url_list = [self.vca.host, '/api/catalog/', image_id]
871 catalog_herf = ''.join(url_list)
872 response = Http.get(url=catalog_herf,
873 headers=vca.vcloud_session.get_vcloud_headers(),
874 verify=vca.verify,
875 logger=vca.logger)
876
877 if response.status_code != requests.codes.ok:
878 self.logger.debug("delete_image():GET REST API call {} failed. "\
879 "Return status code {}".format(catalog_herf,
880 response.status_code))
881 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
882
883 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
884 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
885 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
886
887 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
888 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
889 for catalogItem in catalogItems:
890 catalogItem_href = catalogItem.attrib['href']
891
892 #GET details of catalogItem
893 response = Http.get(url=catalogItem_href,
894 headers=vca.vcloud_session.get_vcloud_headers(),
895 verify=vca.verify,
896 logger=vca.logger)
897
898 if response.status_code != requests.codes.ok:
899 self.logger.debug("delete_image():GET REST API call {} failed. "\
900 "Return status code {}".format(catalog_herf,
901 response.status_code))
902 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
903 catalogItem,
904 image_id))
905
906 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
907 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
908 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
909 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
910
911 #Remove catalogItem
912 response = Http.delete(url= catalogitem_remove_href,
913 headers=vca.vcloud_session.get_vcloud_headers(),
914 verify=vca.verify,
915 logger=vca.logger)
916 if response.status_code == requests.codes.no_content:
917 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
918 else:
919 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
920
921 #Remove catalog
922 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
923 catalog_remove_herf = ''.join(url_list)
924 response = Http.delete(url= catalog_remove_herf,
925 headers=vca.vcloud_session.get_vcloud_headers(),
926 verify=vca.verify,
927 logger=vca.logger)
928
929 if response.status_code == requests.codes.no_content:
930 self.logger.debug("Deleted Catalog {}".format(image_id))
931 return image_id
932 else:
933 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
934
935
936 def catalog_exists(self, catalog_name, catalogs):
937 """
938
939 :param catalog_name:
940 :param catalogs:
941 :return:
942 """
943 for catalog in catalogs:
944 if catalog.name == catalog_name:
945 return True
946 return False
947
948 def create_vimcatalog(self, vca=None, catalog_name=None):
949 """ Create new catalog entry in vCloud director.
950
951 Args
952 vca: vCloud director.
953 catalog_name catalog that client wish to create. Note no validation done for a name.
954 Client must make sure that provide valid string representation.
955
956 Return (bool) True if catalog created.
957
958 """
959 try:
960 task = vca.create_catalog(catalog_name, catalog_name)
961 result = vca.block_until_completed(task)
962 if not result:
963 return False
964 catalogs = vca.get_catalogs()
965 except:
966 return False
967 return self.catalog_exists(catalog_name, catalogs)
968
969 # noinspection PyIncorrectDocstring
970 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
971 description='', progress=False, chunk_bytes=128 * 1024):
972 """
973 Uploads a OVF file to a vCloud catalog
974
975 :param chunk_bytes:
976 :param progress:
977 :param description:
978 :param image_name:
979 :param vca:
980 :param catalog_name: (str): The name of the catalog to upload the media.
981 :param media_file_name: (str): The name of the local media file to upload.
982 :return: (bool) True if the media file was successfully uploaded, false otherwise.
983 """
984 os.path.isfile(media_file_name)
985 statinfo = os.stat(media_file_name)
986
987 # find a catalog entry where we upload OVF.
988 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
989 # status change.
990 # if VCD can parse OVF we upload VMDK file
991 try:
992 for catalog in vca.get_catalogs():
993 if catalog_name != catalog.name:
994 continue
995 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
996 link.get_rel() == 'add', catalog.get_Link())
997 assert len(link) == 1
998 data = """
999 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
1000 """ % (escape(catalog_name), escape(description))
1001 headers = vca.vcloud_session.get_vcloud_headers()
1002 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1003 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
1004 if response.status_code == requests.codes.created:
1005 catalogItem = XmlElementTree.fromstring(response.content)
1006 entity = [child for child in catalogItem if
1007 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1008 href = entity.get('href')
1009 template = href
1010 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1011 verify=vca.verify, logger=self.logger)
1012
1013 if response.status_code == requests.codes.ok:
1014 media = mediaType.parseString(response.content, True)
1015 link = filter(lambda link: link.get_rel() == 'upload:default',
1016 media.get_Files().get_File()[0].get_Link())[0]
1017 headers = vca.vcloud_session.get_vcloud_headers()
1018 headers['Content-Type'] = 'Content-Type text/xml'
1019 response = Http.put(link.get_href(),
1020 data=open(media_file_name, 'rb'),
1021 headers=headers,
1022 verify=vca.verify, logger=self.logger)
1023 if response.status_code != requests.codes.ok:
1024 self.logger.debug(
1025 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1026 media_file_name))
1027 return False
1028
1029 # TODO fix this with aync block
1030 time.sleep(5)
1031
1032 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1033
1034 # uploading VMDK file
1035 # check status of OVF upload and upload remaining files.
1036 response = Http.get(template,
1037 headers=vca.vcloud_session.get_vcloud_headers(),
1038 verify=vca.verify,
1039 logger=self.logger)
1040
1041 if response.status_code == requests.codes.ok:
1042 media = mediaType.parseString(response.content, True)
1043 number_of_files = len(media.get_Files().get_File())
1044 for index in xrange(0, number_of_files):
1045 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1046 media.get_Files().get_File()[index].get_Link())
1047 for link in links_list:
1048 # we skip ovf since it already uploaded.
1049 if 'ovf' in link.get_href():
1050 continue
1051 # The OVF file and VMDK must be in a same directory
1052 head, tail = os.path.split(media_file_name)
1053 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1054 if not os.path.isfile(file_vmdk):
1055 return False
1056 statinfo = os.stat(file_vmdk)
1057 if statinfo.st_size == 0:
1058 return False
1059 hrefvmdk = link.get_href()
1060
1061 if progress:
1062 print("Uploading file: {}".format(file_vmdk))
1063 if progress:
1064 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1065 FileTransferSpeed()]
1066 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1067
1068 bytes_transferred = 0
1069 f = open(file_vmdk, 'rb')
1070 while bytes_transferred < statinfo.st_size:
1071 my_bytes = f.read(chunk_bytes)
1072 if len(my_bytes) <= chunk_bytes:
1073 headers = vca.vcloud_session.get_vcloud_headers()
1074 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1075 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1076 headers['Content-Length'] = str(len(my_bytes))
1077 response = Http.put(hrefvmdk,
1078 headers=headers,
1079 data=my_bytes,
1080 verify=vca.verify,
1081 logger=None)
1082
1083 if response.status_code == requests.codes.ok:
1084 bytes_transferred += len(my_bytes)
1085 if progress:
1086 progress_bar.update(bytes_transferred)
1087 else:
1088 self.logger.debug(
1089 'file upload failed with error: [%s] %s' % (response.status_code,
1090 response.content))
1091
1092 f.close()
1093 return False
1094 f.close()
1095 if progress:
1096 progress_bar.finish()
1097 time.sleep(10)
1098 return True
1099 else:
1100 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1101 format(catalog_name, media_file_name))
1102 return False
1103 except Exception as exp:
1104 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1105 .format(catalog_name,media_file_name, exp))
1106 raise vimconn.vimconnException(
1107 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1108 .format(catalog_name,media_file_name, exp))
1109
1110 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1111 return False
1112
1113 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1114 """Upload media file"""
1115 # TODO add named parameters for readability
1116
1117 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1118 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1119
1120 def validate_uuid4(self, uuid_string=None):
1121 """ Method validate correct format of UUID.
1122
1123 Return: true if string represent valid uuid
1124 """
1125 try:
1126 val = uuid.UUID(uuid_string, version=4)
1127 except ValueError:
1128 return False
1129 return True
1130
1131 def get_catalogid(self, catalog_name=None, catalogs=None):
1132 """ Method check catalog and return catalog ID in UUID format.
1133
1134 Args
1135 catalog_name: catalog name as string
1136 catalogs: list of catalogs.
1137
1138 Return: catalogs uuid
1139 """
1140
1141 for catalog in catalogs:
1142 if catalog.name == catalog_name:
1143 catalog_id = catalog.get_id().split(":")
1144 return catalog_id[3]
1145 return None
1146
1147 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1148 """ Method check catalog and return catalog name lookup done by catalog UUID.
1149
1150 Args
1151 catalog_name: catalog name as string
1152 catalogs: list of catalogs.
1153
1154 Return: catalogs name or None
1155 """
1156
1157 if not self.validate_uuid4(uuid_string=catalog_uuid):
1158 return None
1159
1160 for catalog in catalogs:
1161 catalog_id = catalog.get_id().split(":")[3]
1162 if catalog_id == catalog_uuid:
1163 return catalog.name
1164 return None
1165
1166 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1167 """ Method check catalog and return catalog name lookup done by catalog UUID.
1168
1169 Args
1170 catalog_name: catalog name as string
1171 catalogs: list of catalogs.
1172
1173 Return: catalogs name or None
1174 """
1175
1176 if not self.validate_uuid4(uuid_string=catalog_uuid):
1177 return None
1178
1179 for catalog in catalogs:
1180 catalog_id = catalog.get_id().split(":")[3]
1181 if catalog_id == catalog_uuid:
1182 return catalog
1183 return None
1184
1185 def get_image_id_from_path(self, path=None, progress=False):
1186 """ Method upload OVF image to vCloud director.
1187
1188 Each OVF image represented as single catalog entry in vcloud director.
1189 The method check for existing catalog entry. The check done by file name without file extension.
1190
1191 if given catalog name already present method will respond with existing catalog uuid otherwise
1192 it will create new catalog entry and upload OVF file to newly created catalog.
1193
1194 If method can't create catalog entry or upload a file it will throw exception.
1195
1196 Method accept boolean flag progress that will output progress bar. It useful method
1197 for standalone upload use case. In case to test large file upload.
1198
1199 Args
1200 path: - valid path to OVF file.
1201 progress - boolean progress bar show progress bar.
1202
1203 Return: if image uploaded correct method will provide image catalog UUID.
1204 """
1205
1206 if not path:
1207 raise vimconn.vimconnException("Image path can't be None.")
1208
1209 if not os.path.isfile(path):
1210 raise vimconn.vimconnException("Can't read file. File not found.")
1211
1212 if not os.access(path, os.R_OK):
1213 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1214
1215 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1216
1217 dirpath, filename = os.path.split(path)
1218 flname, file_extension = os.path.splitext(path)
1219 if file_extension != '.ovf':
1220 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1221 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1222
1223 catalog_name = os.path.splitext(filename)[0]
1224 catalog_md5_name = hashlib.md5(path).hexdigest()
1225 self.logger.debug("File name {} Catalog Name {} file path {} "
1226 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1227
1228 try:
1229 catalogs = self.vca.get_catalogs()
1230 except Exception as exp:
1231 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1232 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1233
1234 if len(catalogs) == 0:
1235 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1236 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1237 if not result:
1238 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1239 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1240 media_name=filename, medial_file_name=path, progress=progress)
1241 if not result:
1242 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1243 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1244 else:
1245 for catalog in catalogs:
1246 # search for existing catalog if we find same name we return ID
1247 # TODO optimize this
1248 if catalog.name == catalog_md5_name:
1249 self.logger.debug("Found existing catalog entry for {} "
1250 "catalog id {}".format(catalog_name,
1251 self.get_catalogid(catalog_md5_name, catalogs)))
1252 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1253
1254 # if we didn't find existing catalog we create a new one and upload image.
1255 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1256 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1257 if not result:
1258 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1259
1260 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1261 media_name=filename, medial_file_name=path, progress=progress)
1262 if not result:
1263 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1264
1265 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1266
1267 def get_image_list(self, filter_dict={}):
1268 '''Obtain tenant images from VIM
1269 Filter_dict can be:
1270 name: image name
1271 id: image uuid
1272 checksum: image checksum
1273 location: image path
1274 Returns the image list of dictionaries:
1275 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1276 List can be empty
1277 '''
1278
1279 try:
1280 image_list = []
1281 catalogs = self.vca.get_catalogs()
1282 if len(catalogs) == 0:
1283 return image_list
1284 else:
1285 for catalog in catalogs:
1286 catalog_uuid = catalog.get_id().split(":")[3]
1287 name = catalog.name
1288 filtered_dict = {}
1289 if filter_dict.get("name") and filter_dict["name"] != name:
1290 continue
1291 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1292 continue
1293 filtered_dict ["name"] = name
1294 filtered_dict ["id"] = catalog_uuid
1295 image_list.append(filtered_dict)
1296
1297 self.logger.debug("List of already created catalog items: {}".format(image_list))
1298 return image_list
1299 except Exception as exp:
1300 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1301
1302 def get_vappid(self, vdc=None, vapp_name=None):
1303 """ Method takes vdc object and vApp name and returns vapp uuid or None
1304
1305 Args:
1306 vdc: The VDC object.
1307 vapp_name: is application vappp name identifier
1308
1309 Returns:
1310 The return vApp name otherwise None
1311 """
1312 if vdc is None or vapp_name is None:
1313 return None
1314 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1315 try:
1316 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1317 vdc.ResourceEntities.ResourceEntity)
1318 if len(refs) == 1:
1319 return refs[0].href.split("vapp")[1][1:]
1320 except Exception as e:
1321 self.logger.exception(e)
1322 return False
1323 return None
1324
1325 def check_vapp(self, vdc=None, vapp_uuid=None):
1326 """ Method Method returns True or False if vapp deployed in vCloud director
1327
1328 Args:
1329 vca: Connector to VCA
1330 vdc: The VDC object.
1331 vappid: vappid is application identifier
1332
1333 Returns:
1334 The return True if vApp deployed
1335 :param vdc:
1336 :param vapp_uuid:
1337 """
1338 try:
1339 refs = filter(lambda ref:
1340 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1341 vdc.ResourceEntities.ResourceEntity)
1342 for ref in refs:
1343 vappid = ref.href.split("vapp")[1][1:]
1344 # find vapp with respected vapp uuid
1345 if vappid == vapp_uuid:
1346 return True
1347 except Exception as e:
1348 self.logger.exception(e)
1349 return False
1350 return False
1351
1352 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1353 """Method returns vApp name from vCD and lookup done by vapp_id.
1354
1355 Args:
1356 vca: Connector to VCA
1357 vdc: The VDC object.
1358 vapp_uuid: vappid is application identifier
1359
1360 Returns:
1361 The return vApp name otherwise None
1362 """
1363
1364 try:
1365 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1366 vdc.ResourceEntities.ResourceEntity)
1367 for ref in refs:
1368 # we care only about UUID the rest doesn't matter
1369 vappid = ref.href.split("vapp")[1][1:]
1370 if vappid == vapp_uuid:
1371 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1372 logger=self.logger)
1373
1374 #Retry login if session expired & retry sending request
1375 if response.status_code == 403:
1376 response = self.retry_rest('GET', ref.href)
1377
1378 tree = XmlElementTree.fromstring(response.content)
1379 return tree.attrib['name']
1380 except Exception as e:
1381 self.logger.exception(e)
1382 return None
1383 return None
1384
1385 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1386 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1387 """Adds a VM instance to VIM
1388 Params:
1389 'start': (boolean) indicates if VM must start or created in pause mode.
1390 'image_id','flavor_id': image and flavor VIM id to use for the VM
1391 'net_list': list of interfaces, each one is a dictionary with:
1392 'name': (optional) name for the interface.
1393 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1394 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1395 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1396 'mac_address': (optional) mac address to assign to this interface
1397 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1398 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1399 'type': (mandatory) can be one of:
1400 'virtual', in this case always connected to a network of type 'net_type=bridge'
1401 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1402 can created unconnected
1403 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1404 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1405 are allocated on the same physical NIC
1406 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1407 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1408 or True, it must apply the default VIM behaviour
1409 After execution the method will add the key:
1410 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1411 interface. 'net_list' is modified
1412 'cloud_config': (optional) dictionary with:
1413 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1414 'users': (optional) list of users to be inserted, each item is a dict with:
1415 'name': (mandatory) user name,
1416 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1417 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1418 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1419 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1420 'dest': (mandatory) string with the destination absolute path
1421 'encoding': (optional, by default text). Can be one of:
1422 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1423 'content' (mandatory): string with the content of the file
1424 'permissions': (optional) string with file permissions, typically octal notation '0644'
1425 'owner': (optional) file owner, string with the format 'owner:group'
1426 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1427 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1428 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1429 'size': (mandatory) string with the size of the disk in GB
1430 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1431 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1432 availability_zone_index is None
1433 Returns a tuple with the instance identifier and created_items or raises an exception on error
1434 created_items can be None or a dictionary where this method can include key-values that will be passed to
1435 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1436 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1437 as not present.
1438 """
1439 self.logger.info("Creating new instance for entry {}".format(name))
1440 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1441 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1442
1443 #new vm name = vmname + tenant_id + uuid
1444 new_vm_name = [name, '-', str(uuid.uuid4())]
1445 vmname_andid = ''.join(new_vm_name)
1446
1447 # if vm already deployed we return existing uuid
1448 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1449 # if vapp_uuid is not None:
1450 # return vapp_uuid
1451
1452 # we check for presence of VDC, Catalog entry and Flavor.
1453 vdc = self.get_vdc_details()
1454 if vdc is None:
1455 raise vimconn.vimconnNotFoundException(
1456 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1457 catalogs = self.vca.get_catalogs()
1458 if catalogs is None:
1459 #Retry once, if failed by refreshing token
1460 self.get_token()
1461 catalogs = self.vca.get_catalogs()
1462 if catalogs is None:
1463 raise vimconn.vimconnNotFoundException(
1464 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1465
1466 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1467 if catalog_hash_name:
1468 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1469 else:
1470 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1471 "(Failed retrieve catalog information {})".format(name, image_id))
1472
1473
1474 # Set vCPU and Memory based on flavor.
1475 vm_cpus = None
1476 vm_memory = None
1477 vm_disk = None
1478 numas = None
1479
1480 if flavor_id is not None:
1481 if flavor_id not in vimconnector.flavorlist:
1482 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1483 "Failed retrieve flavor information "
1484 "flavor id {}".format(name, flavor_id))
1485 else:
1486 try:
1487 flavor = vimconnector.flavorlist[flavor_id]
1488 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1489 vm_memory = flavor[FLAVOR_RAM_KEY]
1490 vm_disk = flavor[FLAVOR_DISK_KEY]
1491 extended = flavor.get("extended", None)
1492 if extended:
1493 numas=extended.get("numas", None)
1494
1495 except Exception as exp:
1496 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1497
1498 # image upload creates template name as catalog name space Template.
1499 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1500 power_on = 'false'
1501 if start:
1502 power_on = 'true'
1503
1504 # client must provide at least one entry in net_list if not we report error
1505 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1506 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1507 primary_net = None
1508 primary_netname = None
1509 network_mode = 'bridged'
1510 if net_list is not None and len(net_list) > 0:
1511 for net in net_list:
1512 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1513 primary_net = net
1514 if primary_net is None:
1515 primary_net = net_list[0]
1516
1517 try:
1518 primary_net_id = primary_net['net_id']
1519 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1520 if 'name' in network_dict:
1521 primary_netname = network_dict['name']
1522
1523 except KeyError:
1524 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1525 else:
1526 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1527
1528 # use: 'data', 'bridge', 'mgmt'
1529 # create vApp. Set vcpu and ram based on flavor id.
1530 try:
1531 for retry in (1,2):
1532 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1533 self.get_catalogbyid(image_id, catalogs),
1534 network_name=None, # None while creating vapp
1535 network_mode=network_mode,
1536 vm_name=vmname_andid,
1537 vm_cpus=vm_cpus, # can be None if flavor is None
1538 vm_memory=vm_memory) # can be None if flavor is None
1539
1540 if not vapptask and retry==1:
1541 self.get_token() # Retry getting token
1542 continue
1543 else:
1544 break
1545
1546 if vapptask is None or vapptask is False:
1547 raise vimconn.vimconnUnexpectedResponse(
1548 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1549 if type(vapptask) is VappTask:
1550 self.vca.block_until_completed(vapptask)
1551
1552 except Exception as exp:
1553 raise vimconn.vimconnUnexpectedResponse(
1554 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1555
1556 # we should have now vapp in undeployed state.
1557 try:
1558 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1559
1560 except Exception as exp:
1561 raise vimconn.vimconnUnexpectedResponse(
1562 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1563 .format(vmname_andid, exp))
1564
1565 if vapp_uuid is None:
1566 raise vimconn.vimconnUnexpectedResponse(
1567 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1568 vmname_andid))
1569
1570 #Add PCI passthrough/SRIOV configrations
1571 vm_obj = None
1572 pci_devices_info = []
1573 sriov_net_info = []
1574 reserve_memory = False
1575
1576 for net in net_list:
1577 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1578 pci_devices_info.append(net)
1579 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1580 sriov_net_info.append(net)
1581
1582 #Add PCI
1583 if len(pci_devices_info) > 0:
1584 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1585 vmname_andid ))
1586 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1587 pci_devices_info,
1588 vmname_andid)
1589 if PCI_devices_status:
1590 self.logger.info("Added PCI devives {} to VM {}".format(
1591 pci_devices_info,
1592 vmname_andid)
1593 )
1594 reserve_memory = True
1595 else:
1596 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1597 pci_devices_info,
1598 vmname_andid)
1599 )
1600
1601 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1602 # Modify vm disk
1603 if vm_disk:
1604 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1605 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1606 if result :
1607 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1608
1609 #Add new or existing disks to vApp
1610 if disk_list:
1611 added_existing_disk = False
1612 for disk in disk_list:
1613 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1614 image_id = disk['image_id']
1615 # Adding CD-ROM to VM
1616 # will revisit code once specification ready to support this feature
1617 self.insert_media_to_vm(vapp, image_id)
1618 elif "image_id" in disk and disk["image_id"] is not None:
1619 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1620 disk["image_id"] , vapp_uuid))
1621 self.add_existing_disk(catalogs=catalogs,
1622 image_id=disk["image_id"],
1623 size = disk["size"],
1624 template_name=templateName,
1625 vapp_uuid=vapp_uuid
1626 )
1627 added_existing_disk = True
1628 else:
1629 #Wait till added existing disk gets reflected into vCD database/API
1630 if added_existing_disk:
1631 time.sleep(5)
1632 added_existing_disk = False
1633 self.add_new_disk(vapp_uuid, disk['size'])
1634
1635 if numas:
1636 # Assigning numa affinity setting
1637 for numa in numas:
1638 if 'paired-threads-id' in numa:
1639 paired_threads_id = numa['paired-threads-id']
1640 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1641
1642 # add NICs & connect to networks in netlist
1643 try:
1644 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1645 nicIndex = 0
1646 primary_nic_index = 0
1647 for net in net_list:
1648 # openmano uses network id in UUID format.
1649 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1650 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1651 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1652
1653 if 'net_id' not in net:
1654 continue
1655
1656 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1657 #Same will be returned in refresh_vms_status() as vim_interface_id
1658 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1659
1660 interface_net_id = net['net_id']
1661 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1662 interface_network_mode = net['use']
1663
1664 if interface_network_mode == 'mgmt':
1665 primary_nic_index = nicIndex
1666
1667 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1668 - DHCP (The IP address is obtained from a DHCP service.)
1669 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1670 - NONE (No IP addressing mode specified.)"""
1671
1672 if primary_netname is not None:
1673 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1674 if len(nets) == 1:
1675 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1676
1677 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1678 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1679 if type(task) is GenericTask:
1680 self.vca.block_until_completed(task)
1681 # connect network to VM - with all DHCP by default
1682
1683 type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
1684 if 'type' in net and net['type'] not in type_list:
1685 # fetching nic type from vnf
1686 if 'model' in net:
1687 nic_type = net['model']
1688 self.logger.info("new_vminstance(): adding network adapter "\
1689 "to a network {}".format(nets[0].name))
1690 self.add_network_adapter_to_vms(vapp, nets[0].name,
1691 primary_nic_index,
1692 nicIndex,
1693 net,
1694 nic_type=nic_type)
1695 else:
1696 self.logger.info("new_vminstance(): adding network adapter "\
1697 "to a network {}".format(nets[0].name))
1698 self.add_network_adapter_to_vms(vapp, nets[0].name,
1699 primary_nic_index,
1700 nicIndex,
1701 net)
1702 nicIndex += 1
1703
1704 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1705 # cloud-init for ssh-key injection
1706 if cloud_config:
1707 self.cloud_init(vapp,cloud_config)
1708
1709 # deploy and power on vm
1710 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1711 deploytask = vapp.deploy(powerOn=False)
1712 if type(deploytask) is GenericTask:
1713 self.vca.block_until_completed(deploytask)
1714
1715 # ############# Stub code for SRIOV #################
1716 #Add SRIOV
1717 # if len(sriov_net_info) > 0:
1718 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1719 # vmname_andid ))
1720 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1721 # sriov_net_info,
1722 # vmname_andid)
1723 # if sriov_status:
1724 # self.logger.info("Added SRIOV {} to VM {}".format(
1725 # sriov_net_info,
1726 # vmname_andid)
1727 # )
1728 # reserve_memory = True
1729 # else:
1730 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1731 # sriov_net_info,
1732 # vmname_andid)
1733 # )
1734
1735 # If VM has PCI devices or SRIOV reserve memory for VM
1736 if reserve_memory:
1737 memReserve = vm_obj.config.hardware.memoryMB
1738 spec = vim.vm.ConfigSpec()
1739 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1740 task = vm_obj.ReconfigVM_Task(spec=spec)
1741 if task:
1742 result = self.wait_for_vcenter_task(task, vcenter_conect)
1743 self.logger.info("Reserved memory {} MB for "
1744 "VM VM status: {}".format(str(memReserve), result))
1745 else:
1746 self.logger.info("Fail to reserved memory {} to VM {}".format(
1747 str(memReserve), str(vm_obj)))
1748
1749 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1750
1751 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1752 poweron_task = vapp.poweron()
1753 if type(poweron_task) is GenericTask:
1754 self.vca.block_until_completed(poweron_task)
1755
1756 except Exception as exp :
1757 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1758 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1759 .format(name, exp))
1760 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1761 .format(name, exp))
1762
1763 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1764 wait_time = 0
1765 vapp_uuid = None
1766 while wait_time <= MAX_WAIT_TIME:
1767 try:
1768 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1769 except Exception as exp:
1770 raise vimconn.vimconnUnexpectedResponse(
1771 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1772 .format(vmname_andid, exp))
1773
1774 if vapp and vapp.me.deployed:
1775 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1776 break
1777 else:
1778 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1779 time.sleep(INTERVAL_TIME)
1780
1781 wait_time +=INTERVAL_TIME
1782
1783 if vapp_uuid is not None:
1784 return vapp_uuid, None
1785 else:
1786 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1787
1788 ##
1789 ##
1790 ## based on current discussion
1791 ##
1792 ##
1793 ## server:
1794 # created: '2016-09-08T11:51:58'
1795 # description: simple-instance.linux1.1
1796 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1797 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1798 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1799 # status: ACTIVE
1800 # error_msg:
1801 # interfaces: …
1802 #
1803 def get_vminstance(self, vim_vm_uuid=None):
1804 """Returns the VM instance information from VIM"""
1805
1806 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1807
1808 vdc = self.get_vdc_details()
1809 if vdc is None:
1810 raise vimconn.vimconnConnectionException(
1811 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1812
1813 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1814 if not vm_info_dict:
1815 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1816 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1817
1818 status_key = vm_info_dict['status']
1819 error = ''
1820 try:
1821 vm_dict = {'created': vm_info_dict['created'],
1822 'description': vm_info_dict['name'],
1823 'status': vcdStatusCode2manoFormat[int(status_key)],
1824 'hostId': vm_info_dict['vmuuid'],
1825 'error_msg': error,
1826 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1827
1828 if 'interfaces' in vm_info_dict:
1829 vm_dict['interfaces'] = vm_info_dict['interfaces']
1830 else:
1831 vm_dict['interfaces'] = []
1832 except KeyError:
1833 vm_dict = {'created': '',
1834 'description': '',
1835 'status': vcdStatusCode2manoFormat[int(-1)],
1836 'hostId': vm_info_dict['vmuuid'],
1837 'error_msg': "Inconsistency state",
1838 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1839
1840 return vm_dict
1841
1842 def delete_vminstance(self, vm__vim_uuid, created_items=None):
1843 """Method poweroff and remove VM instance from vcloud director network.
1844
1845 Args:
1846 vm__vim_uuid: VM UUID
1847
1848 Returns:
1849 Returns the instance identifier
1850 """
1851
1852 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1853
1854 vdc = self.get_vdc_details()
1855 if vdc is None:
1856 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1857 self.tenant_name))
1858 raise vimconn.vimconnException(
1859 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1860
1861 try:
1862 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1863 if vapp_name is None:
1864 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1865 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1866 else:
1867 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1868
1869 # Delete vApp and wait for status change if task executed and vApp is None.
1870 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1871
1872 if vapp:
1873 if vapp.me.deployed:
1874 self.logger.info("Powering off vApp {}".format(vapp_name))
1875 #Power off vApp
1876 powered_off = False
1877 wait_time = 0
1878 while wait_time <= MAX_WAIT_TIME:
1879 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1880 if not vapp:
1881 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1882 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1883
1884 power_off_task = vapp.poweroff()
1885 if type(power_off_task) is GenericTask:
1886 result = self.vca.block_until_completed(power_off_task)
1887 if result:
1888 powered_off = True
1889 break
1890 else:
1891 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1892 time.sleep(INTERVAL_TIME)
1893
1894 wait_time +=INTERVAL_TIME
1895 if not powered_off:
1896 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1897 else:
1898 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1899
1900 #Undeploy vApp
1901 self.logger.info("Undeploy vApp {}".format(vapp_name))
1902 wait_time = 0
1903 undeployed = False
1904 while wait_time <= MAX_WAIT_TIME:
1905 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1906 if not vapp:
1907 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1908 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1909 undeploy_task = vapp.undeploy(action='powerOff')
1910
1911 if type(undeploy_task) is GenericTask:
1912 result = self.vca.block_until_completed(undeploy_task)
1913 if result:
1914 undeployed = True
1915 break
1916 else:
1917 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1918 time.sleep(INTERVAL_TIME)
1919
1920 wait_time +=INTERVAL_TIME
1921
1922 if not undeployed:
1923 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1924
1925 # delete vapp
1926 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1927 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1928
1929 if vapp is not None:
1930 wait_time = 0
1931 result = False
1932
1933 while wait_time <= MAX_WAIT_TIME:
1934 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1935 if not vapp:
1936 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1937 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1938
1939 delete_task = vapp.delete()
1940
1941 if type(delete_task) is GenericTask:
1942 self.vca.block_until_completed(delete_task)
1943 result = self.vca.block_until_completed(delete_task)
1944 if result:
1945 break
1946 else:
1947 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1948 time.sleep(INTERVAL_TIME)
1949
1950 wait_time +=INTERVAL_TIME
1951
1952 if not result:
1953 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1954
1955 except:
1956 self.logger.debug(traceback.format_exc())
1957 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1958
1959 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1960 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1961 return vm__vim_uuid
1962 else:
1963 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1964
1965 def refresh_vms_status(self, vm_list):
1966 """Get the status of the virtual machines and their interfaces/ports
1967 Params: the list of VM identifiers
1968 Returns a dictionary with:
1969 vm_id: #VIM id of this Virtual Machine
1970 status: #Mandatory. Text with one of:
1971 # DELETED (not found at vim)
1972 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1973 # OTHER (Vim reported other status not understood)
1974 # ERROR (VIM indicates an ERROR status)
1975 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1976 # CREATING (on building process), ERROR
1977 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1978 #
1979 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1980 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1981 interfaces:
1982 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1983 mac_address: #Text format XX:XX:XX:XX:XX:XX
1984 vim_net_id: #network id where this interface is connected
1985 vim_interface_id: #interface/port VIM id
1986 ip_address: #null, or text with IPv4, IPv6 address
1987 """
1988
1989 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1990
1991 vdc = self.get_vdc_details()
1992 if vdc is None:
1993 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1994
1995 vms_dict = {}
1996 nsx_edge_list = []
1997 for vmuuid in vm_list:
1998 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1999 if vmname is not None:
2000
2001 try:
2002 vm_pci_details = self.get_vm_pci_details(vmuuid)
2003 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
2004 vm_info = the_vapp.get_vms_details()
2005 vm_status = vm_info[0]['status']
2006 vm_info[0].update(vm_pci_details)
2007
2008 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
2009 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
2010 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2011
2012 # get networks
2013 vm_app_networks = the_vapp.get_vms_network_info()
2014 for vapp_network in vm_app_networks:
2015 for vm_network in vapp_network:
2016 if vm_network['name'] == vmname:
2017 #Assign IP Address based on MAC Address in NSX DHCP lease info
2018 if vm_network['ip'] is None:
2019 if not nsx_edge_list:
2020 nsx_edge_list = self.get_edge_details()
2021 if nsx_edge_list is None:
2022 raise vimconn.vimconnException("refresh_vms_status:"\
2023 "Failed to get edge details from NSX Manager")
2024 if vm_network['mac'] is not None:
2025 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2026
2027 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2028 interface = {"mac_address": vm_network['mac'],
2029 "vim_net_id": vm_net_id,
2030 "vim_interface_id": vm_net_id,
2031 'ip_address': vm_network['ip']}
2032 # interface['vim_info'] = yaml.safe_dump(vm_network)
2033 vm_dict["interfaces"].append(interface)
2034 # add a vm to vm dict
2035 vms_dict.setdefault(vmuuid, vm_dict)
2036 except Exception as exp:
2037 self.logger.debug("Error in response {}".format(exp))
2038 self.logger.debug(traceback.format_exc())
2039
2040 return vms_dict
2041
2042
2043 def get_edge_details(self):
2044 """Get the NSX edge list from NSX Manager
2045 Returns list of NSX edges
2046 """
2047 edge_list = []
2048 rheaders = {'Content-Type': 'application/xml'}
2049 nsx_api_url = '/api/4.0/edges'
2050
2051 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2052
2053 try:
2054 resp = requests.get(self.nsx_manager + nsx_api_url,
2055 auth = (self.nsx_user, self.nsx_password),
2056 verify = False, headers = rheaders)
2057 if resp.status_code == requests.codes.ok:
2058 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2059 for edge_pages in paged_Edge_List:
2060 if edge_pages.tag == 'edgePage':
2061 for edge_summary in edge_pages:
2062 if edge_summary.tag == 'pagingInfo':
2063 for element in edge_summary:
2064 if element.tag == 'totalCount' and element.text == '0':
2065 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2066 .format(self.nsx_manager))
2067
2068 if edge_summary.tag == 'edgeSummary':
2069 for element in edge_summary:
2070 if element.tag == 'id':
2071 edge_list.append(element.text)
2072 else:
2073 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2074 .format(self.nsx_manager))
2075
2076 if not edge_list:
2077 raise vimconn.vimconnException("get_edge_details: "\
2078 "No NSX edge details found: {}"
2079 .format(self.nsx_manager))
2080 else:
2081 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2082 return edge_list
2083 else:
2084 self.logger.debug("get_edge_details: "
2085 "Failed to get NSX edge details from NSX Manager: {}"
2086 .format(resp.content))
2087 return None
2088
2089 except Exception as exp:
2090 self.logger.debug("get_edge_details: "\
2091 "Failed to get NSX edge details from NSX Manager: {}"
2092 .format(exp))
2093 raise vimconn.vimconnException("get_edge_details: "\
2094 "Failed to get NSX edge details from NSX Manager: {}"
2095 .format(exp))
2096
2097
2098 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2099 """Get IP address details from NSX edges, using the MAC address
2100 PARAMS: nsx_edges : List of NSX edges
2101 mac_address : Find IP address corresponding to this MAC address
2102 Returns: IP address corrresponding to the provided MAC address
2103 """
2104
2105 ip_addr = None
2106 rheaders = {'Content-Type': 'application/xml'}
2107
2108 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2109
2110 try:
2111 for edge in nsx_edges:
2112 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2113
2114 resp = requests.get(self.nsx_manager + nsx_api_url,
2115 auth = (self.nsx_user, self.nsx_password),
2116 verify = False, headers = rheaders)
2117
2118 if resp.status_code == requests.codes.ok:
2119 dhcp_leases = XmlElementTree.fromstring(resp.text)
2120 for child in dhcp_leases:
2121 if child.tag == 'dhcpLeaseInfo':
2122 dhcpLeaseInfo = child
2123 for leaseInfo in dhcpLeaseInfo:
2124 for elem in leaseInfo:
2125 if (elem.tag)=='macAddress':
2126 edge_mac_addr = elem.text
2127 if (elem.tag)=='ipAddress':
2128 ip_addr = elem.text
2129 if edge_mac_addr is not None:
2130 if edge_mac_addr == mac_address:
2131 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2132 .format(ip_addr, mac_address,edge))
2133 return ip_addr
2134 else:
2135 self.logger.debug("get_ipaddr_from_NSXedge: "\
2136 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2137 .format(resp.content))
2138
2139 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2140 return None
2141
2142 except XmlElementTree.ParseError as Err:
2143 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2144
2145
2146 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2147 """Send and action over a VM instance from VIM
2148 Returns the vm_id if the action was successfully sent to the VIM"""
2149
2150 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2151 if vm__vim_uuid is None or action_dict is None:
2152 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2153
2154 vdc = self.get_vdc_details()
2155 if vdc is None:
2156 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2157
2158 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2159 if vapp_name is None:
2160 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2161 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2162 else:
2163 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2164
2165 try:
2166 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2167 # TODO fix all status
2168 if "start" in action_dict:
2169 vm_info = the_vapp.get_vms_details()
2170 vm_status = vm_info[0]['status']
2171 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2172 if vm_status == "Suspended" or vm_status == "Powered off":
2173 power_on_task = the_vapp.poweron()
2174 result = self.vca.block_until_completed(power_on_task)
2175 self.instance_actions_result("start", result, vapp_name)
2176 elif "rebuild" in action_dict:
2177 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2178 rebuild_task = the_vapp.deploy(powerOn=True)
2179 result = self.vca.block_until_completed(rebuild_task)
2180 self.instance_actions_result("rebuild", result, vapp_name)
2181 elif "pause" in action_dict:
2182 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2183 pause_task = the_vapp.undeploy(action='suspend')
2184 result = self.vca.block_until_completed(pause_task)
2185 self.instance_actions_result("pause", result, vapp_name)
2186 elif "resume" in action_dict:
2187 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2188 power_task = the_vapp.poweron()
2189 result = self.vca.block_until_completed(power_task)
2190 self.instance_actions_result("resume", result, vapp_name)
2191 elif "shutoff" in action_dict or "shutdown" in action_dict:
2192 action_name , value = action_dict.items()[0]
2193 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2194 power_off_task = the_vapp.undeploy(action='powerOff')
2195 result = self.vca.block_until_completed(power_off_task)
2196 if action_name == "shutdown":
2197 self.instance_actions_result("shutdown", result, vapp_name)
2198 else:
2199 self.instance_actions_result("shutoff", result, vapp_name)
2200 elif "forceOff" in action_dict:
2201 result = the_vapp.undeploy(action='force')
2202 self.instance_actions_result("forceOff", result, vapp_name)
2203 elif "reboot" in action_dict:
2204 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2205 reboot_task = the_vapp.reboot()
2206 else:
2207 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2208 return None
2209 except Exception as exp :
2210 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2211 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2212
2213 def instance_actions_result(self, action, result, vapp_name):
2214 if result:
2215 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2216 else:
2217 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2218
2219 def get_vminstance_console(self, vm_id, console_type="vnc"):
2220 """
2221 Get a console for the virtual machine
2222 Params:
2223 vm_id: uuid of the VM
2224 console_type, can be:
2225 "novnc" (by default), "xvpvnc" for VNC types,
2226 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2227 Returns dict with the console parameters:
2228 protocol: ssh, ftp, http, https, ...
2229 server: usually ip address
2230 port: the http, ssh, ... port
2231 suffix: extra text, e.g. the http path and query string
2232 """
2233 raise vimconn.vimconnNotImplemented("Should have implemented this")
2234
2235 # NOT USED METHODS in current version
2236
2237 def host_vim2gui(self, host, server_dict):
2238 """Transform host dictionary from VIM format to GUI format,
2239 and append to the server_dict
2240 """
2241 raise vimconn.vimconnNotImplemented("Should have implemented this")
2242
2243 def get_hosts_info(self):
2244 """Get the information of deployed hosts
2245 Returns the hosts content"""
2246 raise vimconn.vimconnNotImplemented("Should have implemented this")
2247
2248 def get_hosts(self, vim_tenant):
2249 """Get the hosts and deployed instances
2250 Returns the hosts content"""
2251 raise vimconn.vimconnNotImplemented("Should have implemented this")
2252
2253 def get_processor_rankings(self):
2254 """Get the processor rankings in the VIM database"""
2255 raise vimconn.vimconnNotImplemented("Should have implemented this")
2256
2257 def new_host(self, host_data):
2258 """Adds a new host to VIM"""
2259 '''Returns status code of the VIM response'''
2260 raise vimconn.vimconnNotImplemented("Should have implemented this")
2261
2262 def new_external_port(self, port_data):
2263 """Adds a external port to VIM"""
2264 '''Returns the port identifier'''
2265 raise vimconn.vimconnNotImplemented("Should have implemented this")
2266
2267 def new_external_network(self, net_name, net_type):
2268 """Adds a external network to VIM (shared)"""
2269 '''Returns the network identifier'''
2270 raise vimconn.vimconnNotImplemented("Should have implemented this")
2271
2272 def connect_port_network(self, port_id, network_id, admin=False):
2273 """Connects a external port to a network"""
2274 '''Returns status code of the VIM response'''
2275 raise vimconn.vimconnNotImplemented("Should have implemented this")
2276
2277 def new_vminstancefromJSON(self, vm_data):
2278 """Adds a VM instance to VIM"""
2279 '''Returns the instance identifier'''
2280 raise vimconn.vimconnNotImplemented("Should have implemented this")
2281
2282 def get_network_name_by_id(self, network_uuid=None):
2283 """Method gets vcloud director network named based on supplied uuid.
2284
2285 Args:
2286 network_uuid: network_id
2287
2288 Returns:
2289 The return network name.
2290 """
2291
2292 if not network_uuid:
2293 return None
2294
2295 try:
2296 org_dict = self.get_org(self.org_uuid)
2297 if 'networks' in org_dict:
2298 org_network_dict = org_dict['networks']
2299 for net_uuid in org_network_dict:
2300 if net_uuid == network_uuid:
2301 return org_network_dict[net_uuid]
2302 except:
2303 self.logger.debug("Exception in get_network_name_by_id")
2304 self.logger.debug(traceback.format_exc())
2305
2306 return None
2307
2308 def get_network_id_by_name(self, network_name=None):
2309 """Method gets vcloud director network uuid based on supplied name.
2310
2311 Args:
2312 network_name: network_name
2313 Returns:
2314 The return network uuid.
2315 network_uuid: network_id
2316 """
2317
2318 if not network_name:
2319 self.logger.debug("get_network_id_by_name() : Network name is empty")
2320 return None
2321
2322 try:
2323 org_dict = self.get_org(self.org_uuid)
2324 if org_dict and 'networks' in org_dict:
2325 org_network_dict = org_dict['networks']
2326 for net_uuid,net_name in org_network_dict.iteritems():
2327 if net_name == network_name:
2328 return net_uuid
2329
2330 except KeyError as exp:
2331 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2332
2333 return None
2334
2335 def list_org_action(self):
2336 """
2337 Method leverages vCloud director and query for available organization for particular user
2338
2339 Args:
2340 vca - is active VCA connection.
2341 vdc_name - is a vdc name that will be used to query vms action
2342
2343 Returns:
2344 The return XML respond
2345 """
2346
2347 url_list = [self.vca.host, '/api/org']
2348 vm_list_rest_call = ''.join(url_list)
2349
2350 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2351 response = Http.get(url=vm_list_rest_call,
2352 headers=self.vca.vcloud_session.get_vcloud_headers(),
2353 verify=self.vca.verify,
2354 logger=self.vca.logger)
2355
2356 if response.status_code == 403:
2357 response = self.retry_rest('GET', vm_list_rest_call)
2358
2359 if response.status_code == requests.codes.ok:
2360 return response.content
2361
2362 return None
2363
2364 def get_org_action(self, org_uuid=None):
2365 """
2366 Method leverages vCloud director and retrieve available object fdr organization.
2367
2368 Args:
2369 vca - is active VCA connection.
2370 vdc_name - is a vdc name that will be used to query vms action
2371
2372 Returns:
2373 The return XML respond
2374 """
2375
2376 if org_uuid is None:
2377 return None
2378
2379 url_list = [self.vca.host, '/api/org/', org_uuid]
2380 vm_list_rest_call = ''.join(url_list)
2381
2382 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2383 response = Http.get(url=vm_list_rest_call,
2384 headers=self.vca.vcloud_session.get_vcloud_headers(),
2385 verify=self.vca.verify,
2386 logger=self.vca.logger)
2387
2388 #Retry login if session expired & retry sending request
2389 if response.status_code == 403:
2390 response = self.retry_rest('GET', vm_list_rest_call)
2391
2392 if response.status_code == requests.codes.ok:
2393 return response.content
2394
2395 return None
2396
2397 def get_org(self, org_uuid=None):
2398 """
2399 Method retrieves available organization in vCloud Director
2400
2401 Args:
2402 org_uuid - is a organization uuid.
2403
2404 Returns:
2405 The return dictionary with following key
2406 "network" - for network list under the org
2407 "catalogs" - for network list under the org
2408 "vdcs" - for vdc list under org
2409 """
2410
2411 org_dict = {}
2412
2413 if org_uuid is None:
2414 return org_dict
2415
2416 content = self.get_org_action(org_uuid=org_uuid)
2417 try:
2418 vdc_list = {}
2419 network_list = {}
2420 catalog_list = {}
2421 vm_list_xmlroot = XmlElementTree.fromstring(content)
2422 for child in vm_list_xmlroot:
2423 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2424 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2425 org_dict['vdcs'] = vdc_list
2426 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2427 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2428 org_dict['networks'] = network_list
2429 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2430 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2431 org_dict['catalogs'] = catalog_list
2432 except:
2433 pass
2434
2435 return org_dict
2436
2437 def get_org_list(self):
2438 """
2439 Method retrieves available organization in vCloud Director
2440
2441 Args:
2442 vca - is active VCA connection.
2443
2444 Returns:
2445 The return dictionary and key for each entry VDC UUID
2446 """
2447
2448 org_dict = {}
2449
2450 content = self.list_org_action()
2451 try:
2452 vm_list_xmlroot = XmlElementTree.fromstring(content)
2453 for vm_xml in vm_list_xmlroot:
2454 if vm_xml.tag.split("}")[1] == 'Org':
2455 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2456 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2457 except:
2458 pass
2459
2460 return org_dict
2461
2462 def vms_view_action(self, vdc_name=None):
2463 """ Method leverages vCloud director vms query call
2464
2465 Args:
2466 vca - is active VCA connection.
2467 vdc_name - is a vdc name that will be used to query vms action
2468
2469 Returns:
2470 The return XML respond
2471 """
2472 vca = self.connect()
2473 if vdc_name is None:
2474 return None
2475
2476 url_list = [vca.host, '/api/vms/query']
2477 vm_list_rest_call = ''.join(url_list)
2478
2479 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2480 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2481 vca.vcloud_session.organization.Link)
2482 if len(refs) == 1:
2483 response = Http.get(url=vm_list_rest_call,
2484 headers=vca.vcloud_session.get_vcloud_headers(),
2485 verify=vca.verify,
2486 logger=vca.logger)
2487 if response.status_code == requests.codes.ok:
2488 return response.content
2489
2490 return None
2491
2492 def get_vapp_list(self, vdc_name=None):
2493 """
2494 Method retrieves vApp list deployed vCloud director and returns a dictionary
2495 contains a list of all vapp deployed for queried VDC.
2496 The key for a dictionary is vApp UUID
2497
2498
2499 Args:
2500 vca - is active VCA connection.
2501 vdc_name - is a vdc name that will be used to query vms action
2502
2503 Returns:
2504 The return dictionary and key for each entry vapp UUID
2505 """
2506
2507 vapp_dict = {}
2508 if vdc_name is None:
2509 return vapp_dict
2510
2511 content = self.vms_view_action(vdc_name=vdc_name)
2512 try:
2513 vm_list_xmlroot = XmlElementTree.fromstring(content)
2514 for vm_xml in vm_list_xmlroot:
2515 if vm_xml.tag.split("}")[1] == 'VMRecord':
2516 if vm_xml.attrib['isVAppTemplate'] == 'true':
2517 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2518 if 'vappTemplate-' in rawuuid[0]:
2519 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2520 # vm and use raw UUID as key
2521 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2522 except:
2523 pass
2524
2525 return vapp_dict
2526
2527 def get_vm_list(self, vdc_name=None):
2528 """
2529 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2530 contains a list of all VM's deployed for queried VDC.
2531 The key for a dictionary is VM UUID
2532
2533
2534 Args:
2535 vca - is active VCA connection.
2536 vdc_name - is a vdc name that will be used to query vms action
2537
2538 Returns:
2539 The return dictionary and key for each entry vapp UUID
2540 """
2541 vm_dict = {}
2542
2543 if vdc_name is None:
2544 return vm_dict
2545
2546 content = self.vms_view_action(vdc_name=vdc_name)
2547 try:
2548 vm_list_xmlroot = XmlElementTree.fromstring(content)
2549 for vm_xml in vm_list_xmlroot:
2550 if vm_xml.tag.split("}")[1] == 'VMRecord':
2551 if vm_xml.attrib['isVAppTemplate'] == 'false':
2552 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2553 if 'vm-' in rawuuid[0]:
2554 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2555 # vm and use raw UUID as key
2556 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2557 except:
2558 pass
2559
2560 return vm_dict
2561
2562 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2563 """
2564 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2565 contains a list of all VM's deployed for queried VDC.
2566 The key for a dictionary is VM UUID
2567
2568
2569 Args:
2570 vca - is active VCA connection.
2571 vdc_name - is a vdc name that will be used to query vms action
2572
2573 Returns:
2574 The return dictionary and key for each entry vapp UUID
2575 """
2576 vm_dict = {}
2577 vca = self.connect()
2578 if not vca:
2579 raise vimconn.vimconnConnectionException("self.connect() is failed")
2580
2581 if vdc_name is None:
2582 return vm_dict
2583
2584 content = self.vms_view_action(vdc_name=vdc_name)
2585 try:
2586 vm_list_xmlroot = XmlElementTree.fromstring(content)
2587 for vm_xml in vm_list_xmlroot:
2588 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2589 # lookup done by UUID
2590 if isuuid:
2591 if vapp_name in vm_xml.attrib['container']:
2592 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2593 if 'vm-' in rawuuid[0]:
2594 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2595 break
2596 # lookup done by Name
2597 else:
2598 if vapp_name in vm_xml.attrib['name']:
2599 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2600 if 'vm-' in rawuuid[0]:
2601 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2602 break
2603 except:
2604 pass
2605
2606 return vm_dict
2607
2608 def get_network_action(self, network_uuid=None):
2609 """
2610 Method leverages vCloud director and query network based on network uuid
2611
2612 Args:
2613 vca - is active VCA connection.
2614 network_uuid - is a network uuid
2615
2616 Returns:
2617 The return XML respond
2618 """
2619
2620 if network_uuid is None:
2621 return None
2622
2623 url_list = [self.vca.host, '/api/network/', network_uuid]
2624 vm_list_rest_call = ''.join(url_list)
2625
2626 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2627 response = Http.get(url=vm_list_rest_call,
2628 headers=self.vca.vcloud_session.get_vcloud_headers(),
2629 verify=self.vca.verify,
2630 logger=self.vca.logger)
2631
2632 #Retry login if session expired & retry sending request
2633 if response.status_code == 403:
2634 response = self.retry_rest('GET', vm_list_rest_call)
2635
2636 if response.status_code == requests.codes.ok:
2637 return response.content
2638
2639 return None
2640
2641 def get_vcd_network(self, network_uuid=None):
2642 """
2643 Method retrieves available network from vCloud Director
2644
2645 Args:
2646 network_uuid - is VCD network UUID
2647
2648 Each element serialized as key : value pair
2649
2650 Following keys available for access. network_configuration['Gateway'}
2651 <Configuration>
2652 <IpScopes>
2653 <IpScope>
2654 <IsInherited>true</IsInherited>
2655 <Gateway>172.16.252.100</Gateway>
2656 <Netmask>255.255.255.0</Netmask>
2657 <Dns1>172.16.254.201</Dns1>
2658 <Dns2>172.16.254.202</Dns2>
2659 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2660 <IsEnabled>true</IsEnabled>
2661 <IpRanges>
2662 <IpRange>
2663 <StartAddress>172.16.252.1</StartAddress>
2664 <EndAddress>172.16.252.99</EndAddress>
2665 </IpRange>
2666 </IpRanges>
2667 </IpScope>
2668 </IpScopes>
2669 <FenceMode>bridged</FenceMode>
2670
2671 Returns:
2672 The return dictionary and key for each entry vapp UUID
2673 """
2674
2675 network_configuration = {}
2676 if network_uuid is None:
2677 return network_uuid
2678
2679 try:
2680 content = self.get_network_action(network_uuid=network_uuid)
2681 vm_list_xmlroot = XmlElementTree.fromstring(content)
2682
2683 network_configuration['status'] = vm_list_xmlroot.get("status")
2684 network_configuration['name'] = vm_list_xmlroot.get("name")
2685 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2686
2687 for child in vm_list_xmlroot:
2688 if child.tag.split("}")[1] == 'IsShared':
2689 network_configuration['isShared'] = child.text.strip()
2690 if child.tag.split("}")[1] == 'Configuration':
2691 for configuration in child.iter():
2692 tagKey = configuration.tag.split("}")[1].strip()
2693 if tagKey != "":
2694 network_configuration[tagKey] = configuration.text.strip()
2695 return network_configuration
2696 except Exception as exp :
2697 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2698 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2699
2700 return network_configuration
2701
2702 def delete_network_action(self, network_uuid=None):
2703 """
2704 Method delete given network from vCloud director
2705
2706 Args:
2707 network_uuid - is a network uuid that client wish to delete
2708
2709 Returns:
2710 The return None or XML respond or false
2711 """
2712
2713 vca = self.connect_as_admin()
2714 if not vca:
2715 raise vimconn.vimconnConnectionException("self.connect() is failed")
2716 if network_uuid is None:
2717 return False
2718
2719 url_list = [vca.host, '/api/admin/network/', network_uuid]
2720 vm_list_rest_call = ''.join(url_list)
2721
2722 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2723 response = Http.delete(url=vm_list_rest_call,
2724 headers=vca.vcloud_session.get_vcloud_headers(),
2725 verify=vca.verify,
2726 logger=vca.logger)
2727
2728 if response.status_code == 202:
2729 return True
2730
2731 return False
2732
2733 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2734 ip_profile=None, isshared='true'):
2735 """
2736 Method create network in vCloud director
2737
2738 Args:
2739 network_name - is network name to be created.
2740 net_type - can be 'bridge','data','ptp','mgmt'.
2741 ip_profile is a dict containing the IP parameters of the network
2742 isshared - is a boolean
2743 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2744 It optional attribute. by default if no parent network indicate the first available will be used.
2745
2746 Returns:
2747 The return network uuid or return None
2748 """
2749
2750 new_network_name = [network_name, '-', str(uuid.uuid4())]
2751 content = self.create_network_rest(network_name=''.join(new_network_name),
2752 ip_profile=ip_profile,
2753 net_type=net_type,
2754 parent_network_uuid=parent_network_uuid,
2755 isshared=isshared)
2756 if content is None:
2757 self.logger.debug("Failed create network {}.".format(network_name))
2758 return None
2759
2760 try:
2761 vm_list_xmlroot = XmlElementTree.fromstring(content)
2762 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2763 if len(vcd_uuid) == 4:
2764 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2765 return vcd_uuid[3]
2766 except:
2767 self.logger.debug("Failed create network {}".format(network_name))
2768 return None
2769
2770 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2771 ip_profile=None, isshared='true'):
2772 """
2773 Method create network in vCloud director
2774
2775 Args:
2776 network_name - is network name to be created.
2777 net_type - can be 'bridge','data','ptp','mgmt'.
2778 ip_profile is a dict containing the IP parameters of the network
2779 isshared - is a boolean
2780 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2781 It optional attribute. by default if no parent network indicate the first available will be used.
2782
2783 Returns:
2784 The return network uuid or return None
2785 """
2786
2787 vca = self.connect_as_admin()
2788 if not vca:
2789 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2790 if network_name is None:
2791 return None
2792
2793 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2794 vm_list_rest_call = ''.join(url_list)
2795 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2796 response = Http.get(url=vm_list_rest_call,
2797 headers=vca.vcloud_session.get_vcloud_headers(),
2798 verify=vca.verify,
2799 logger=vca.logger)
2800
2801 provider_network = None
2802 available_networks = None
2803 add_vdc_rest_url = None
2804
2805 if response.status_code != requests.codes.ok:
2806 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2807 response.status_code))
2808 return None
2809 else:
2810 try:
2811 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2812 for child in vm_list_xmlroot:
2813 if child.tag.split("}")[1] == 'ProviderVdcReference':
2814 provider_network = child.attrib.get('href')
2815 # application/vnd.vmware.admin.providervdc+xml
2816 if child.tag.split("}")[1] == 'Link':
2817 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2818 and child.attrib.get('rel') == 'add':
2819 add_vdc_rest_url = child.attrib.get('href')
2820 except:
2821 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2822 self.logger.debug("Respond body {}".format(response.content))
2823 return None
2824
2825 # find pvdc provided available network
2826 response = Http.get(url=provider_network,
2827 headers=vca.vcloud_session.get_vcloud_headers(),
2828 verify=vca.verify,
2829 logger=vca.logger)
2830 if response.status_code != requests.codes.ok:
2831 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2832 response.status_code))
2833 return None
2834
2835 # available_networks.split("/")[-1]
2836
2837 if parent_network_uuid is None:
2838 try:
2839 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2840 for child in vm_list_xmlroot.iter():
2841 if child.tag.split("}")[1] == 'AvailableNetworks':
2842 for networks in child.iter():
2843 # application/vnd.vmware.admin.network+xml
2844 if networks.attrib.get('href') is not None:
2845 available_networks = networks.attrib.get('href')
2846 break
2847 except:
2848 return None
2849
2850 try:
2851 #Configure IP profile of the network
2852 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2853
2854 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2855 subnet_rand = random.randint(0, 255)
2856 ip_base = "192.168.{}.".format(subnet_rand)
2857 ip_profile['subnet_address'] = ip_base + "0/24"
2858 else:
2859 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2860
2861 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2862 ip_profile['gateway_address']=ip_base + "1"
2863 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2864 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2865 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2866 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2867 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2868 ip_profile['dhcp_start_address']=ip_base + "3"
2869 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2870 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2871 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2872 ip_profile['dns_address']=ip_base + "2"
2873
2874 gateway_address=ip_profile['gateway_address']
2875 dhcp_count=int(ip_profile['dhcp_count'])
2876 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2877
2878 if ip_profile['dhcp_enabled']==True:
2879 dhcp_enabled='true'
2880 else:
2881 dhcp_enabled='false'
2882 dhcp_start_address=ip_profile['dhcp_start_address']
2883
2884 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2885 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2886 end_ip_int += dhcp_count - 1
2887 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2888
2889 ip_version=ip_profile['ip_version']
2890 dns_address=ip_profile['dns_address']
2891 except KeyError as exp:
2892 self.logger.debug("Create Network REST: Key error {}".format(exp))
2893 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2894
2895 # either use client provided UUID or search for a first available
2896 # if both are not defined we return none
2897 if parent_network_uuid is not None:
2898 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2899 add_vdc_rest_url = ''.join(url_list)
2900
2901 #Creating all networks as Direct Org VDC type networks.
2902 #Unused in case of Underlay (data/ptp) network interface.
2903 fence_mode="bridged"
2904 is_inherited='false'
2905 dns_list = dns_address.split(";")
2906 dns1 = dns_list[0]
2907 dns2_text = ""
2908 if len(dns_list) >= 2:
2909 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2910 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2911 <Description>Openmano created</Description>
2912 <Configuration>
2913 <IpScopes>
2914 <IpScope>
2915 <IsInherited>{1:s}</IsInherited>
2916 <Gateway>{2:s}</Gateway>
2917 <Netmask>{3:s}</Netmask>
2918 <Dns1>{4:s}</Dns1>{5:s}
2919 <IsEnabled>{6:s}</IsEnabled>
2920 <IpRanges>
2921 <IpRange>
2922 <StartAddress>{7:s}</StartAddress>
2923 <EndAddress>{8:s}</EndAddress>
2924 </IpRange>
2925 </IpRanges>
2926 </IpScope>
2927 </IpScopes>
2928 <ParentNetwork href="{9:s}"/>
2929 <FenceMode>{10:s}</FenceMode>
2930 </Configuration>
2931 <IsShared>{11:s}</IsShared>
2932 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2933 subnet_address, dns1, dns2_text, dhcp_enabled,
2934 dhcp_start_address, dhcp_end_address, available_networks,
2935 fence_mode, isshared)
2936
2937 headers = vca.vcloud_session.get_vcloud_headers()
2938 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2939 try:
2940 response = Http.post(url=add_vdc_rest_url,
2941 headers=headers,
2942 data=data,
2943 verify=vca.verify,
2944 logger=vca.logger)
2945
2946 if response.status_code != 201:
2947 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2948 .format(response.status_code,response.content))
2949 else:
2950 network = networkType.parseString(response.content, True)
2951 create_nw_task = network.get_Tasks().get_Task()[0]
2952
2953 # if we all ok we respond with content after network creation completes
2954 # otherwise by default return None
2955 if create_nw_task is not None:
2956 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2957 status = vca.block_until_completed(create_nw_task)
2958 if status:
2959 return response.content
2960 else:
2961 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2962 .format(response.content))
2963 except Exception as exp:
2964 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2965
2966 return None
2967
2968 def convert_cidr_to_netmask(self, cidr_ip=None):
2969 """
2970 Method sets convert CIDR netmask address to normal IP format
2971 Args:
2972 cidr_ip : CIDR IP address
2973 Returns:
2974 netmask : Converted netmask
2975 """
2976 if cidr_ip is not None:
2977 if '/' in cidr_ip:
2978 network, net_bits = cidr_ip.split('/')
2979 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2980 else:
2981 netmask = cidr_ip
2982 return netmask
2983 return None
2984
2985 def get_provider_rest(self, vca=None):
2986 """
2987 Method gets provider vdc view from vcloud director
2988
2989 Args:
2990 network_name - is network name to be created.
2991 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2992 It optional attribute. by default if no parent network indicate the first available will be used.
2993
2994 Returns:
2995 The return xml content of respond or None
2996 """
2997
2998 url_list = [vca.host, '/api/admin']
2999 response = Http.get(url=''.join(url_list),
3000 headers=vca.vcloud_session.get_vcloud_headers(),
3001 verify=vca.verify,
3002 logger=vca.logger)
3003
3004 if response.status_code == requests.codes.ok:
3005 return response.content
3006 return None
3007
3008 def create_vdc(self, vdc_name=None):
3009
3010 vdc_dict = {}
3011
3012 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3013 if xml_content is not None:
3014 try:
3015 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3016 for child in task_resp_xmlroot:
3017 if child.tag.split("}")[1] == 'Owner':
3018 vdc_id = child.attrib.get('href').split("/")[-1]
3019 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3020 return vdc_dict
3021 except:
3022 self.logger.debug("Respond body {}".format(xml_content))
3023
3024 return None
3025
3026 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3027 """
3028 Method create vdc in vCloud director based on VDC template.
3029 it uses pre-defined template that must be named openmano
3030
3031 Args:
3032 vdc_name - name of a new vdc.
3033
3034 Returns:
3035 The return xml content of respond or None
3036 """
3037
3038 self.logger.info("Creating new vdc {}".format(vdc_name))
3039 vca = self.connect()
3040 if not vca:
3041 raise vimconn.vimconnConnectionException("self.connect() is failed")
3042 if vdc_name is None:
3043 return None
3044
3045 url_list = [vca.host, '/api/vdcTemplates']
3046 vm_list_rest_call = ''.join(url_list)
3047 response = Http.get(url=vm_list_rest_call,
3048 headers=vca.vcloud_session.get_vcloud_headers(),
3049 verify=vca.verify,
3050 logger=vca.logger)
3051
3052 # container url to a template
3053 vdc_template_ref = None
3054 try:
3055 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3056 for child in vm_list_xmlroot:
3057 # application/vnd.vmware.admin.providervdc+xml
3058 # we need find a template from witch we instantiate VDC
3059 if child.tag.split("}")[1] == 'VdcTemplate':
3060 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3061 vdc_template_ref = child.attrib.get('href')
3062 except:
3063 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3064 self.logger.debug("Respond body {}".format(response.content))
3065 return None
3066
3067 # if we didn't found required pre defined template we return None
3068 if vdc_template_ref is None:
3069 return None
3070
3071 try:
3072 # instantiate vdc
3073 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3074 vm_list_rest_call = ''.join(url_list)
3075 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3076 <Source href="{1:s}"></Source>
3077 <Description>opnemano</Description>
3078 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3079 headers = vca.vcloud_session.get_vcloud_headers()
3080 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3081 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3082 logger=vca.logger)
3083
3084 vdc_task = taskType.parseString(response.content, True)
3085 if type(vdc_task) is GenericTask:
3086 self.vca.block_until_completed(vdc_task)
3087
3088 # if we all ok we respond with content otherwise by default None
3089 if response.status_code >= 200 and response.status_code < 300:
3090 return response.content
3091 return None
3092 except:
3093 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3094 self.logger.debug("Respond body {}".format(response.content))
3095
3096 return None
3097
3098 def create_vdc_rest(self, vdc_name=None):
3099 """
3100 Method create network in vCloud director
3101
3102 Args:
3103 network_name - is network name to be created.
3104 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3105 It optional attribute. by default if no parent network indicate the first available will be used.
3106
3107 Returns:
3108 The return network uuid or return None
3109 """
3110
3111 self.logger.info("Creating new vdc {}".format(vdc_name))
3112
3113 vca = self.connect_as_admin()
3114 if not vca:
3115 raise vimconn.vimconnConnectionException("self.connect() is failed")
3116 if vdc_name is None:
3117 return None
3118
3119 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3120 vm_list_rest_call = ''.join(url_list)
3121 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3122 response = Http.get(url=vm_list_rest_call,
3123 headers=vca.vcloud_session.get_vcloud_headers(),
3124 verify=vca.verify,
3125 logger=vca.logger)
3126
3127 provider_vdc_ref = None
3128 add_vdc_rest_url = None
3129 available_networks = None
3130
3131 if response.status_code != requests.codes.ok:
3132 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3133 response.status_code))
3134 return None
3135 else:
3136 try:
3137 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3138 for child in vm_list_xmlroot:
3139 # application/vnd.vmware.admin.providervdc+xml
3140 if child.tag.split("}")[1] == 'Link':
3141 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3142 and child.attrib.get('rel') == 'add':
3143 add_vdc_rest_url = child.attrib.get('href')
3144 except:
3145 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3146 self.logger.debug("Respond body {}".format(response.content))
3147 return None
3148
3149 response = self.get_provider_rest(vca=vca)
3150 try:
3151 vm_list_xmlroot = XmlElementTree.fromstring(response)
3152 for child in vm_list_xmlroot:
3153 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3154 for sub_child in child:
3155 provider_vdc_ref = sub_child.attrib.get('href')
3156 except:
3157 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3158 self.logger.debug("Respond body {}".format(response))
3159 return None
3160
3161 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3162 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3163 <AllocationModel>ReservationPool</AllocationModel>
3164 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3165 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3166 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3167 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3168 <ProviderVdcReference
3169 name="Main Provider"
3170 href="{2:s}" />
3171 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3172 escape(vdc_name),
3173 provider_vdc_ref)
3174
3175 headers = vca.vcloud_session.get_vcloud_headers()
3176 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3177 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3178 logger=vca.logger)
3179
3180 # if we all ok we respond with content otherwise by default None
3181 if response.status_code == 201:
3182 return response.content
3183 return None
3184
3185 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3186 """
3187 Method retrieve vapp detail from vCloud director
3188
3189 Args:
3190 vapp_uuid - is vapp identifier.
3191
3192 Returns:
3193 The return network uuid or return None
3194 """
3195
3196 parsed_respond = {}
3197 vca = None
3198
3199 if need_admin_access:
3200 vca = self.connect_as_admin()
3201 else:
3202 vca = self.vca
3203
3204 if not vca:
3205 raise vimconn.vimconnConnectionException("self.connect() is failed")
3206 if vapp_uuid is None:
3207 return None
3208
3209 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3210 get_vapp_restcall = ''.join(url_list)
3211
3212 if vca.vcloud_session and vca.vcloud_session.organization:
3213 response = Http.get(url=get_vapp_restcall,
3214 headers=vca.vcloud_session.get_vcloud_headers(),
3215 verify=vca.verify,
3216 logger=vca.logger)
3217
3218 if response.status_code == 403:
3219 if need_admin_access == False:
3220 response = self.retry_rest('GET', get_vapp_restcall)
3221
3222 if response.status_code != requests.codes.ok:
3223 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3224 response.status_code))
3225 return parsed_respond
3226
3227 try:
3228 xmlroot_respond = XmlElementTree.fromstring(response.content)
3229 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3230
3231 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3232 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3233 'vmw': 'http://www.vmware.com/schema/ovf',
3234 'vm': 'http://www.vmware.com/vcloud/v1.5',
3235 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3236 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3237 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3238 }
3239
3240 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3241 if created_section is not None:
3242 parsed_respond['created'] = created_section.text
3243
3244 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3245 if network_section is not None and 'networkName' in network_section.attrib:
3246 parsed_respond['networkname'] = network_section.attrib['networkName']
3247
3248 ipscopes_section = \
3249 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3250 namespaces)
3251 if ipscopes_section is not None:
3252 for ipscope in ipscopes_section:
3253 for scope in ipscope:
3254 tag_key = scope.tag.split("}")[1]
3255 if tag_key == 'IpRanges':
3256 ip_ranges = scope.getchildren()
3257 for ipblock in ip_ranges:
3258 for block in ipblock:
3259 parsed_respond[block.tag.split("}")[1]] = block.text
3260 else:
3261 parsed_respond[tag_key] = scope.text
3262
3263 # parse children section for other attrib
3264 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3265 if children_section is not None:
3266 parsed_respond['name'] = children_section.attrib['name']
3267 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3268 if "nestedHypervisorEnabled" in children_section.attrib else None
3269 parsed_respond['deployed'] = children_section.attrib['deployed']
3270 parsed_respond['status'] = children_section.attrib['status']
3271 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3272 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3273 nic_list = []
3274 for adapters in network_adapter:
3275 adapter_key = adapters.tag.split("}")[1]
3276 if adapter_key == 'PrimaryNetworkConnectionIndex':
3277 parsed_respond['primarynetwork'] = adapters.text
3278 if adapter_key == 'NetworkConnection':
3279 vnic = {}
3280 if 'network' in adapters.attrib:
3281 vnic['network'] = adapters.attrib['network']
3282 for adapter in adapters:
3283 setting_key = adapter.tag.split("}")[1]
3284 vnic[setting_key] = adapter.text
3285 nic_list.append(vnic)
3286
3287 for link in children_section:
3288 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3289 if link.attrib['rel'] == 'screen:acquireTicket':
3290 parsed_respond['acquireTicket'] = link.attrib
3291 if link.attrib['rel'] == 'screen:acquireMksTicket':
3292 parsed_respond['acquireMksTicket'] = link.attrib
3293
3294 parsed_respond['interfaces'] = nic_list
3295 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3296 if vCloud_extension_section is not None:
3297 vm_vcenter_info = {}
3298 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3299 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3300 if vmext is not None:
3301 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3302 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3303
3304 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3305 vm_virtual_hardware_info = {}
3306 if virtual_hardware_section is not None:
3307 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3308 if item.find("rasd:Description",namespaces).text == "Hard disk":
3309 disk_size = item.find("rasd:HostResource" ,namespaces
3310 ).attrib["{"+namespaces['vm']+"}capacity"]
3311
3312 vm_virtual_hardware_info["disk_size"]= disk_size
3313 break
3314
3315 for link in virtual_hardware_section:
3316 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3317 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3318 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3319 break
3320
3321 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3322 except Exception as exp :
3323 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3324 return parsed_respond
3325
3326 def acuire_console(self, vm_uuid=None):
3327
3328 if vm_uuid is None:
3329 return None
3330
3331 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3332 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3333 console_dict = vm_dict['acquireTicket']
3334 console_rest_call = console_dict['href']
3335
3336 response = Http.post(url=console_rest_call,
3337 headers=self.vca.vcloud_session.get_vcloud_headers(),
3338 verify=self.vca.verify,
3339 logger=self.vca.logger)
3340 if response.status_code == 403:
3341 response = self.retry_rest('POST', console_rest_call)
3342
3343 if response.status_code == requests.codes.ok:
3344 return response.content
3345
3346 return None
3347
3348 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3349 """
3350 Method retrieve vm disk details
3351
3352 Args:
3353 vapp_uuid - is vapp identifier.
3354 flavor_disk - disk size as specified in VNFD (flavor)
3355
3356 Returns:
3357 The return network uuid or return None
3358 """
3359 status = None
3360 try:
3361 #Flavor disk is in GB convert it into MB
3362 flavor_disk = int(flavor_disk) * 1024
3363 vm_details = self.get_vapp_details_rest(vapp_uuid)
3364 if vm_details:
3365 vm_name = vm_details["name"]
3366 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3367
3368 if vm_details and "vm_virtual_hardware" in vm_details:
3369 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3370 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3371
3372 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3373
3374 if flavor_disk > vm_disk:
3375 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3376 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3377 vm_disk, flavor_disk ))
3378 else:
3379 status = True
3380 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3381
3382 return status
3383 except Exception as exp:
3384 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3385
3386
3387 def modify_vm_disk_rest(self, disk_href , disk_size):
3388 """
3389 Method retrieve modify vm disk size
3390
3391 Args:
3392 disk_href - vCD API URL to GET and PUT disk data
3393 disk_size - disk size as specified in VNFD (flavor)
3394
3395 Returns:
3396 The return network uuid or return None
3397 """
3398 if disk_href is None or disk_size is None:
3399 return None
3400
3401 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3402 response = Http.get(url=disk_href,
3403 headers=self.vca.vcloud_session.get_vcloud_headers(),
3404 verify=self.vca.verify,
3405 logger=self.vca.logger)
3406
3407 if response.status_code == 403:
3408 response = self.retry_rest('GET', disk_href)
3409
3410 if response.status_code != requests.codes.ok:
3411 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3412 response.status_code))
3413 return None
3414 try:
3415 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3416 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3417 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3418
3419 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3420 if item.find("rasd:Description",namespaces).text == "Hard disk":
3421 disk_item = item.find("rasd:HostResource" ,namespaces )
3422 if disk_item is not None:
3423 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3424 break
3425
3426 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3427 xml_declaration=True)
3428
3429 #Send PUT request to modify disk size
3430 headers = self.vca.vcloud_session.get_vcloud_headers()
3431 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3432
3433 response = Http.put(url=disk_href,
3434 data=data,
3435 headers=headers,
3436 verify=self.vca.verify, logger=self.logger)
3437
3438 if response.status_code == 403:
3439 add_headers = {'Content-Type': headers['Content-Type']}
3440 response = self.retry_rest('PUT', disk_href, add_headers, data)
3441
3442 if response.status_code != 202:
3443 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3444 response.status_code))
3445 else:
3446 modify_disk_task = taskType.parseString(response.content, True)
3447 if type(modify_disk_task) is GenericTask:
3448 status = self.vca.block_until_completed(modify_disk_task)
3449 return status
3450
3451 return None
3452
3453 except Exception as exp :
3454 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3455 return None
3456
3457 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3458 """
3459 Method to attach pci devices to VM
3460
3461 Args:
3462 vapp_uuid - uuid of vApp/VM
3463 pci_devices - pci devices infromation as specified in VNFD (flavor)
3464
3465 Returns:
3466 The status of add pci device task , vm object and
3467 vcenter_conect object
3468 """
3469 vm_obj = None
3470 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3471 vcenter_conect, content = self.get_vcenter_content()
3472 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3473
3474 if vm_moref_id:
3475 try:
3476 no_of_pci_devices = len(pci_devices)
3477 if no_of_pci_devices > 0:
3478 #Get VM and its host
3479 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3480 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3481 if host_obj and vm_obj:
3482 #get PCI devies from host on which vapp is currently installed
3483 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3484
3485 if avilable_pci_devices is None:
3486 #find other hosts with active pci devices
3487 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3488 content,
3489 no_of_pci_devices
3490 )
3491
3492 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3493 #Migrate vm to the host where PCI devices are availble
3494 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3495 task = self.relocate_vm(new_host_obj, vm_obj)
3496 if task is not None:
3497 result = self.wait_for_vcenter_task(task, vcenter_conect)
3498 self.logger.info("Migrate VM status: {}".format(result))
3499 host_obj = new_host_obj
3500 else:
3501 self.logger.info("Fail to migrate VM : {}".format(result))
3502 raise vimconn.vimconnNotFoundException(
3503 "Fail to migrate VM : {} to host {}".format(
3504 vmname_andid,
3505 new_host_obj)
3506 )
3507
3508 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3509 #Add PCI devices one by one
3510 for pci_device in avilable_pci_devices:
3511 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3512 if task:
3513 status= self.wait_for_vcenter_task(task, vcenter_conect)
3514 if status:
3515 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3516 else:
3517 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3518 return True, vm_obj, vcenter_conect
3519 else:
3520 self.logger.error("Currently there is no host with"\
3521 " {} number of avaialble PCI devices required for VM {}".format(
3522 no_of_pci_devices,
3523 vmname_andid)
3524 )
3525 raise vimconn.vimconnNotFoundException(
3526 "Currently there is no host with {} "\
3527 "number of avaialble PCI devices required for VM {}".format(
3528 no_of_pci_devices,
3529 vmname_andid))
3530 else:
3531 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3532
3533 except vmodl.MethodFault as error:
3534 self.logger.error("Error occurred while adding PCI devices {} ",error)
3535 return None, vm_obj, vcenter_conect
3536
3537 def get_vm_obj(self, content, mob_id):
3538 """
3539 Method to get the vsphere VM object associated with a given morf ID
3540 Args:
3541 vapp_uuid - uuid of vApp/VM
3542 content - vCenter content object
3543 mob_id - mob_id of VM
3544
3545 Returns:
3546 VM and host object
3547 """
3548 vm_obj = None
3549 host_obj = None
3550 try :
3551 container = content.viewManager.CreateContainerView(content.rootFolder,
3552 [vim.VirtualMachine], True
3553 )
3554 for vm in container.view:
3555 mobID = vm._GetMoId()
3556 if mobID == mob_id:
3557 vm_obj = vm
3558 host_obj = vm_obj.runtime.host
3559 break
3560 except Exception as exp:
3561 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3562 return host_obj, vm_obj
3563
3564 def get_pci_devices(self, host, need_devices):
3565 """
3566 Method to get the details of pci devices on given host
3567 Args:
3568 host - vSphere host object
3569 need_devices - number of pci devices needed on host
3570
3571 Returns:
3572 array of pci devices
3573 """
3574 all_devices = []
3575 all_device_ids = []
3576 used_devices_ids = []
3577
3578 try:
3579 if host:
3580 pciPassthruInfo = host.config.pciPassthruInfo
3581 pciDevies = host.hardware.pciDevice
3582
3583 for pci_status in pciPassthruInfo:
3584 if pci_status.passthruActive:
3585 for device in pciDevies:
3586 if device.id == pci_status.id:
3587 all_device_ids.append(device.id)
3588 all_devices.append(device)
3589
3590 #check if devices are in use
3591 avalible_devices = all_devices
3592 for vm in host.vm:
3593 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3594 vm_devices = vm.config.hardware.device
3595 for device in vm_devices:
3596 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3597 if device.backing.id in all_device_ids:
3598 for use_device in avalible_devices:
3599 if use_device.id == device.backing.id:
3600 avalible_devices.remove(use_device)
3601 used_devices_ids.append(device.backing.id)
3602 self.logger.debug("Device {} from devices {}"\
3603 "is in use".format(device.backing.id,
3604 device)
3605 )
3606 if len(avalible_devices) < need_devices:
3607 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3608 need_devices))
3609 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3610 avalible_devices))
3611 return None
3612 else:
3613 required_devices = avalible_devices[:need_devices]
3614 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3615 len(avalible_devices),
3616 host,
3617 need_devices))
3618 self.logger.info("Retruning {} devices as {}".format(need_devices,
3619 required_devices ))
3620 return required_devices
3621
3622 except Exception as exp:
3623 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3624
3625 return None
3626
3627 def get_host_and_PCIdevices(self, content, need_devices):
3628 """
3629 Method to get the details of pci devices infromation on all hosts
3630
3631 Args:
3632 content - vSphere host object
3633 need_devices - number of pci devices needed on host
3634
3635 Returns:
3636 array of pci devices and host object
3637 """
3638 host_obj = None
3639 pci_device_objs = None
3640 try:
3641 if content:
3642 container = content.viewManager.CreateContainerView(content.rootFolder,
3643 [vim.HostSystem], True)
3644 for host in container.view:
3645 devices = self.get_pci_devices(host, need_devices)
3646 if devices:
3647 host_obj = host
3648 pci_device_objs = devices
3649 break
3650 except Exception as exp:
3651 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3652
3653 return host_obj,pci_device_objs
3654
3655 def relocate_vm(self, dest_host, vm) :
3656 """
3657 Method to get the relocate VM to new host
3658
3659 Args:
3660 dest_host - vSphere host object
3661 vm - vSphere VM object
3662
3663 Returns:
3664 task object
3665 """
3666 task = None
3667 try:
3668 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3669 task = vm.Relocate(relocate_spec)
3670 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3671 except Exception as exp:
3672 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3673 dest_host, vm, exp))
3674 return task
3675
3676 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3677 """
3678 Waits and provides updates on a vSphere task
3679 """
3680 while task.info.state == vim.TaskInfo.State.running:
3681 time.sleep(2)
3682
3683 if task.info.state == vim.TaskInfo.State.success:
3684 if task.info.result is not None and not hideResult:
3685 self.logger.info('{} completed successfully, result: {}'.format(
3686 actionName,
3687 task.info.result))
3688 else:
3689 self.logger.info('Task {} completed successfully.'.format(actionName))
3690 else:
3691 self.logger.error('{} did not complete successfully: {} '.format(
3692 actionName,
3693 task.info.error)
3694 )
3695
3696 return task.info.result
3697
3698 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3699 """
3700 Method to add pci device in given VM
3701
3702 Args:
3703 host_object - vSphere host object
3704 vm_object - vSphere VM object
3705 host_pci_dev - host_pci_dev must be one of the devices from the
3706 host_object.hardware.pciDevice list
3707 which is configured as a PCI passthrough device
3708
3709 Returns:
3710 task object
3711 """
3712 task = None
3713 if vm_object and host_object and host_pci_dev:
3714 try :
3715 #Add PCI device to VM
3716 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3717 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3718
3719 if host_pci_dev.id not in systemid_by_pciid:
3720 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3721 return None
3722
3723 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3724 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3725 id=host_pci_dev.id,
3726 systemId=systemid_by_pciid[host_pci_dev.id],
3727 vendorId=host_pci_dev.vendorId,
3728 deviceName=host_pci_dev.deviceName)
3729
3730 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3731
3732 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3733 new_device_config.operation = "add"
3734 vmConfigSpec = vim.vm.ConfigSpec()
3735 vmConfigSpec.deviceChange = [new_device_config]
3736
3737 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3738 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3739 host_pci_dev, vm_object, host_object)
3740 )
3741 except Exception as exp:
3742 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3743 host_pci_dev,
3744 vm_object,
3745 exp))
3746 return task
3747
3748 def get_vm_vcenter_info(self):
3749 """
3750 Method to get details of vCenter and vm
3751
3752 Args:
3753 vapp_uuid - uuid of vApp or VM
3754
3755 Returns:
3756 Moref Id of VM and deails of vCenter
3757 """
3758 vm_vcenter_info = {}
3759
3760 if self.vcenter_ip is not None:
3761 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3762 else:
3763 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3764 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3765 if self.vcenter_port is not None:
3766 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3767 else:
3768 raise vimconn.vimconnException(message="vCenter port is not provided."\
3769 " Please provide vCenter port while attaching datacenter to tenant in --config")
3770 if self.vcenter_user is not None:
3771 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3772 else:
3773 raise vimconn.vimconnException(message="vCenter user is not provided."\
3774 " Please provide vCenter user while attaching datacenter to tenant in --config")
3775
3776 if self.vcenter_password is not None:
3777 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3778 else:
3779 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3780 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3781
3782 return vm_vcenter_info
3783
3784
3785 def get_vm_pci_details(self, vmuuid):
3786 """
3787 Method to get VM PCI device details from vCenter
3788
3789 Args:
3790 vm_obj - vSphere VM object
3791
3792 Returns:
3793 dict of PCI devives attached to VM
3794
3795 """
3796 vm_pci_devices_info = {}
3797 try:
3798 vcenter_conect, content = self.get_vcenter_content()
3799 vm_moref_id = self.get_vm_moref_id(vmuuid)
3800 if vm_moref_id:
3801 #Get VM and its host
3802 if content:
3803 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3804 if host_obj and vm_obj:
3805 vm_pci_devices_info["host_name"]= host_obj.name
3806 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3807 for device in vm_obj.config.hardware.device:
3808 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3809 device_details={'devide_id':device.backing.id,
3810 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3811 }
3812 vm_pci_devices_info[device.deviceInfo.label] = device_details
3813 else:
3814 self.logger.error("Can not connect to vCenter while getting "\
3815 "PCI devices infromationn")
3816 return vm_pci_devices_info
3817 except Exception as exp:
3818 self.logger.error("Error occurred while getting VM infromationn"\
3819 " for VM : {}".format(exp))
3820 raise vimconn.vimconnException(message=exp)
3821
3822 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3823 """
3824 Method to add network adapter type to vm
3825 Args :
3826 network_name - name of network
3827 primary_nic_index - int value for primary nic index
3828 nicIndex - int value for nic index
3829 nic_type - specify model name to which add to vm
3830 Returns:
3831 None
3832 """
3833
3834 try:
3835 ip_address = None
3836 floating_ip = False
3837 if 'floating_ip' in net: floating_ip = net['floating_ip']
3838
3839 # Stub for ip_address feature
3840 if 'ip_address' in net: ip_address = net['ip_address']
3841
3842 if floating_ip:
3843 allocation_mode = "POOL"
3844 elif ip_address:
3845 allocation_mode = "MANUAL"
3846 else:
3847 allocation_mode = "DHCP"
3848
3849 if not nic_type:
3850 for vms in vapp._get_vms():
3851 vm_id = (vms.id).split(':')[-1]
3852
3853 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3854
3855 response = Http.get(url=url_rest_call,
3856 headers=self.vca.vcloud_session.get_vcloud_headers(),
3857 verify=self.vca.verify,
3858 logger=self.vca.logger)
3859
3860 if response.status_code == 403:
3861 response = self.retry_rest('GET', url_rest_call)
3862
3863 if response.status_code != 200:
3864 self.logger.error("REST call {} failed reason : {}"\
3865 "status code : {}".format(url_rest_call,
3866 response.content,
3867 response.status_code))
3868 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3869 "network connection section")
3870
3871 data = response.content
3872 if '<PrimaryNetworkConnectionIndex>' not in data:
3873 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3874 <NetworkConnection network="{}">
3875 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3876 <IsConnected>true</IsConnected>
3877 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3878 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3879 allocation_mode)
3880 # Stub for ip_address feature
3881 if ip_address:
3882 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3883 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3884
3885 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3886 else:
3887 new_item = """<NetworkConnection network="{}">
3888 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3889 <IsConnected>true</IsConnected>
3890 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3891 </NetworkConnection>""".format(network_name, nicIndex,
3892 allocation_mode)
3893 # Stub for ip_address feature
3894 if ip_address:
3895 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3896 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3897
3898 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3899
3900 headers = self.vca.vcloud_session.get_vcloud_headers()
3901 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3902 response = Http.put(url=url_rest_call, headers=headers, data=data,
3903 verify=self.vca.verify,
3904 logger=self.vca.logger)
3905
3906 if response.status_code == 403:
3907 add_headers = {'Content-Type': headers['Content-Type']}
3908 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3909
3910 if response.status_code != 202:
3911 self.logger.error("REST call {} failed reason : {}"\
3912 "status code : {} ".format(url_rest_call,
3913 response.content,
3914 response.status_code))
3915 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3916 "network connection section")
3917 else:
3918 nic_task = taskType.parseString(response.content, True)
3919 if isinstance(nic_task, GenericTask):
3920 self.vca.block_until_completed(nic_task)
3921 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3922 "default NIC type".format(vm_id))
3923 else:
3924 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3925 "connect NIC type".format(vm_id))
3926 else:
3927 for vms in vapp._get_vms():
3928 vm_id = (vms.id).split(':')[-1]
3929
3930 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3931
3932 response = Http.get(url=url_rest_call,
3933 headers=self.vca.vcloud_session.get_vcloud_headers(),
3934 verify=self.vca.verify,
3935 logger=self.vca.logger)
3936
3937 if response.status_code == 403:
3938 response = self.retry_rest('GET', url_rest_call)
3939
3940 if response.status_code != 200:
3941 self.logger.error("REST call {} failed reason : {}"\
3942 "status code : {}".format(url_rest_call,
3943 response.content,
3944 response.status_code))
3945 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3946 "network connection section")
3947 data = response.content
3948 if '<PrimaryNetworkConnectionIndex>' not in data:
3949 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3950 <NetworkConnection network="{}">
3951 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3952 <IsConnected>true</IsConnected>
3953 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3954 <NetworkAdapterType>{}</NetworkAdapterType>
3955 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3956 allocation_mode, nic_type)
3957 # Stub for ip_address feature
3958 if ip_address:
3959 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3960 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3961
3962 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3963 else:
3964 new_item = """<NetworkConnection network="{}">
3965 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3966 <IsConnected>true</IsConnected>
3967 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3968 <NetworkAdapterType>{}</NetworkAdapterType>
3969 </NetworkConnection>""".format(network_name, nicIndex,
3970 allocation_mode, nic_type)
3971 # Stub for ip_address feature
3972 if ip_address:
3973 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3974 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3975
3976 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3977
3978 headers = self.vca.vcloud_session.get_vcloud_headers()
3979 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3980 response = Http.put(url=url_rest_call, headers=headers, data=data,
3981 verify=self.vca.verify,
3982 logger=self.vca.logger)
3983
3984 if response.status_code == 403:
3985 add_headers = {'Content-Type': headers['Content-Type']}
3986 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3987
3988 if response.status_code != 202:
3989 self.logger.error("REST call {} failed reason : {}"\
3990 "status code : {}".format(url_rest_call,
3991 response.content,
3992 response.status_code))
3993 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3994 "network connection section")
3995 else:
3996 nic_task = taskType.parseString(response.content, True)
3997 if isinstance(nic_task, GenericTask):
3998 self.vca.block_until_completed(nic_task)
3999 self.logger.info("add_network_adapter_to_vms(): VM {} "\
4000 "conneced to NIC type {}".format(vm_id, nic_type))
4001 else:
4002 self.logger.error("add_network_adapter_to_vms(): VM {} "\
4003 "failed to connect NIC type {}".format(vm_id, nic_type))
4004 except Exception as exp:
4005 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
4006 "while adding Network adapter")
4007 raise vimconn.vimconnException(message=exp)
4008
4009
4010 def set_numa_affinity(self, vmuuid, paired_threads_id):
4011 """
4012 Method to assign numa affinity in vm configuration parammeters
4013 Args :
4014 vmuuid - vm uuid
4015 paired_threads_id - one or more virtual processor
4016 numbers
4017 Returns:
4018 return if True
4019 """
4020 try:
4021 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
4022 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
4023 context = None
4024 if hasattr(ssl, '_create_unverified_context'):
4025 context = ssl._create_unverified_context()
4026 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
4027 pwd=self.passwd, port=int(vm_vcenter_port),
4028 sslContext=context)
4029 atexit.register(Disconnect, vcenter_conect)
4030 content = vcenter_conect.RetrieveContent()
4031
4032 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4033 if vm_obj:
4034 config_spec = vim.vm.ConfigSpec()
4035 config_spec.extraConfig = []
4036 opt = vim.option.OptionValue()
4037 opt.key = 'numa.nodeAffinity'
4038 opt.value = str(paired_threads_id)
4039 config_spec.extraConfig.append(opt)
4040 task = vm_obj.ReconfigVM_Task(config_spec)
4041 if task:
4042 result = self.wait_for_vcenter_task(task, vcenter_conect)
4043 extra_config = vm_obj.config.extraConfig
4044 flag = False
4045 for opts in extra_config:
4046 if 'numa.nodeAffinity' in opts.key:
4047 flag = True
4048 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4049 "value {} for vm {}".format(opt.value, vm_obj))
4050 if flag:
4051 return
4052 else:
4053 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4054 except Exception as exp:
4055 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4056 "for VM {} : {}".format(vm_obj, vm_moref_id))
4057 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4058 "affinity".format(exp))
4059
4060
4061 def cloud_init(self, vapp, cloud_config):
4062 """
4063 Method to inject ssh-key
4064 vapp - vapp object
4065 cloud_config a dictionary with:
4066 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4067 'users': (optional) list of users to be inserted, each item is a dict with:
4068 'name': (mandatory) user name,
4069 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4070 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4071 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4072 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4073 'dest': (mandatory) string with the destination absolute path
4074 'encoding': (optional, by default text). Can be one of:
4075 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4076 'content' (mandatory): string with the content of the file
4077 'permissions': (optional) string with file permissions, typically octal notation '0644'
4078 'owner': (optional) file owner, string with the format 'owner:group'
4079 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4080 """
4081 try:
4082 if not isinstance(cloud_config, dict):
4083 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4084 else:
4085 key_pairs = []
4086 userdata = []
4087 if "key-pairs" in cloud_config:
4088 key_pairs = cloud_config["key-pairs"]
4089
4090 if "users" in cloud_config:
4091 userdata = cloud_config["users"]
4092
4093 self.logger.debug("cloud_init : Guest os customization started..")
4094 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4095 self.guest_customization(vapp, customize_script)
4096
4097 except Exception as exp:
4098 self.logger.error("cloud_init : exception occurred while injecting "\
4099 "ssh-key")
4100 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4101 "ssh-key".format(exp))
4102
4103 def format_script(self, key_pairs=[], users_list=[]):
4104 bash_script = """
4105 #!/bin/bash
4106 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4107 if [ "$1" = "precustomization" ];then
4108 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4109 """
4110
4111 keys = "\n".join(key_pairs)
4112 if keys:
4113 keys_data = """
4114 if [ ! -d /root/.ssh ];then
4115 mkdir /root/.ssh
4116 chown root:root /root/.ssh
4117 chmod 700 /root/.ssh
4118 touch /root/.ssh/authorized_keys
4119 chown root:root /root/.ssh/authorized_keys
4120 chmod 600 /root/.ssh/authorized_keys
4121 # make centos with selinux happy
4122 which restorecon && restorecon -Rv /root/.ssh
4123 else
4124 touch /root/.ssh/authorized_keys
4125 chown root:root /root/.ssh/authorized_keys
4126 chmod 600 /root/.ssh/authorized_keys
4127 fi
4128 echo '{key}' >> /root/.ssh/authorized_keys
4129 """.format(key=keys)
4130
4131 bash_script+= keys_data
4132
4133 for user in users_list:
4134 if 'name' in user: user_name = user['name']
4135 if 'key-pairs' in user:
4136 user_keys = "\n".join(user['key-pairs'])
4137 else:
4138 user_keys = None
4139
4140 add_user_name = """
4141 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4142 """.format(user_name=user_name)
4143
4144 bash_script+= add_user_name
4145
4146 if user_keys:
4147 user_keys_data = """
4148 mkdir /home/{user_name}/.ssh
4149 chown {user_name}:{user_name} /home/{user_name}/.ssh
4150 chmod 700 /home/{user_name}/.ssh
4151 touch /home/{user_name}/.ssh/authorized_keys
4152 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4153 chmod 600 /home/{user_name}/.ssh/authorized_keys
4154 # make centos with selinux happy
4155 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4156 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4157 """.format(user_name=user_name,user_key=user_keys)
4158
4159 bash_script+= user_keys_data
4160
4161 return bash_script+"\n\tfi"
4162
4163 def guest_customization(self, vapp, customize_script):
4164 """
4165 Method to customize guest os
4166 vapp - Vapp object
4167 customize_script - Customize script to be run at first boot of VM.
4168 """
4169 for vm in vapp._get_vms():
4170 vm_name = vm.name
4171 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4172 if isinstance(task, GenericTask):
4173 self.vca.block_until_completed(task)
4174 self.logger.info("guest_customization : customized guest os task "\
4175 "completed for VM {}".format(vm_name))
4176 else:
4177 self.logger.error("guest_customization : task for customized guest os"\
4178 "failed for VM {}".format(vm_name))
4179 raise vimconn.vimconnException("guest_customization : failed to perform"\
4180 "guest os customization on VM {}".format(vm_name))
4181
4182 def add_new_disk(self, vapp_uuid, disk_size):
4183 """
4184 Method to create an empty vm disk
4185
4186 Args:
4187 vapp_uuid - is vapp identifier.
4188 disk_size - size of disk to be created in GB
4189
4190 Returns:
4191 None
4192 """
4193 status = False
4194 vm_details = None
4195 try:
4196 #Disk size in GB, convert it into MB
4197 if disk_size is not None:
4198 disk_size_mb = int(disk_size) * 1024
4199 vm_details = self.get_vapp_details_rest(vapp_uuid)
4200
4201 if vm_details and "vm_virtual_hardware" in vm_details:
4202 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4203 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4204 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4205
4206 except Exception as exp:
4207 msg = "Error occurred while creating new disk {}.".format(exp)
4208 self.rollback_newvm(vapp_uuid, msg)
4209
4210 if status:
4211 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4212 else:
4213 #If failed to add disk, delete VM
4214 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4215 self.rollback_newvm(vapp_uuid, msg)
4216
4217
4218 def add_new_disk_rest(self, disk_href, disk_size_mb):
4219 """
4220 Retrives vApp Disks section & add new empty disk
4221
4222 Args:
4223 disk_href: Disk section href to addd disk
4224 disk_size_mb: Disk size in MB
4225
4226 Returns: Status of add new disk task
4227 """
4228 status = False
4229 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4230 response = Http.get(url=disk_href,
4231 headers=self.vca.vcloud_session.get_vcloud_headers(),
4232 verify=self.vca.verify,
4233 logger=self.vca.logger)
4234
4235 if response.status_code == 403:
4236 response = self.retry_rest('GET', disk_href)
4237
4238 if response.status_code != requests.codes.ok:
4239 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4240 .format(disk_href, response.status_code))
4241 return status
4242 try:
4243 #Find but type & max of instance IDs assigned to disks
4244 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4245 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4246 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4247 instance_id = 0
4248 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4249 if item.find("rasd:Description",namespaces).text == "Hard disk":
4250 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4251 if inst_id > instance_id:
4252 instance_id = inst_id
4253 disk_item = item.find("rasd:HostResource" ,namespaces)
4254 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4255 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4256
4257 instance_id = instance_id + 1
4258 new_item = """<Item>
4259 <rasd:Description>Hard disk</rasd:Description>
4260 <rasd:ElementName>New disk</rasd:ElementName>
4261 <rasd:HostResource
4262 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4263 vcloud:capacity="{}"
4264 vcloud:busSubType="{}"
4265 vcloud:busType="{}"></rasd:HostResource>
4266 <rasd:InstanceID>{}</rasd:InstanceID>
4267 <rasd:ResourceType>17</rasd:ResourceType>
4268 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4269
4270 new_data = response.content
4271 #Add new item at the bottom
4272 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4273
4274 # Send PUT request to modify virtual hardware section with new disk
4275 headers = self.vca.vcloud_session.get_vcloud_headers()
4276 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4277
4278 response = Http.put(url=disk_href,
4279 data=new_data,
4280 headers=headers,
4281 verify=self.vca.verify, logger=self.logger)
4282
4283 if response.status_code == 403:
4284 add_headers = {'Content-Type': headers['Content-Type']}
4285 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4286
4287 if response.status_code != 202:
4288 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4289 .format(disk_href, response.status_code, response.content))
4290 else:
4291 add_disk_task = taskType.parseString(response.content, True)
4292 if type(add_disk_task) is GenericTask:
4293 status = self.vca.block_until_completed(add_disk_task)
4294 if not status:
4295 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4296
4297 except Exception as exp:
4298 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4299
4300 return status
4301
4302
4303 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4304 """
4305 Method to add existing disk to vm
4306 Args :
4307 catalogs - List of VDC catalogs
4308 image_id - Catalog ID
4309 template_name - Name of template in catalog
4310 vapp_uuid - UUID of vApp
4311 Returns:
4312 None
4313 """
4314 disk_info = None
4315 vcenter_conect, content = self.get_vcenter_content()
4316 #find moref-id of vm in image
4317 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4318 image_id=image_id,
4319 )
4320
4321 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4322 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4323 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4324 if catalog_vm_moref_id:
4325 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4326 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4327 if catalog_vm_obj:
4328 #find existing disk
4329 disk_info = self.find_disk(catalog_vm_obj)
4330 else:
4331 exp_msg = "No VM with image id {} found".format(image_id)
4332 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4333 else:
4334 exp_msg = "No Image found with image ID {} ".format(image_id)
4335 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4336
4337 if disk_info:
4338 self.logger.info("Existing disk_info : {}".format(disk_info))
4339 #get VM
4340 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4341 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4342 if vm_obj:
4343 status = self.add_disk(vcenter_conect=vcenter_conect,
4344 vm=vm_obj,
4345 disk_info=disk_info,
4346 size=size,
4347 vapp_uuid=vapp_uuid
4348 )
4349 if status:
4350 self.logger.info("Disk from image id {} added to {}".format(image_id,
4351 vm_obj.config.name)
4352 )
4353 else:
4354 msg = "No disk found with image id {} to add in VM {}".format(
4355 image_id,
4356 vm_obj.config.name)
4357 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4358
4359
4360 def find_disk(self, vm_obj):
4361 """
4362 Method to find details of existing disk in VM
4363 Args :
4364 vm_obj - vCenter object of VM
4365 image_id - Catalog ID
4366 Returns:
4367 disk_info : dict of disk details
4368 """
4369 disk_info = {}
4370 if vm_obj:
4371 try:
4372 devices = vm_obj.config.hardware.device
4373 for device in devices:
4374 if type(device) is vim.vm.device.VirtualDisk:
4375 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4376 disk_info["full_path"] = device.backing.fileName
4377 disk_info["datastore"] = device.backing.datastore
4378 disk_info["capacityKB"] = device.capacityInKB
4379 break
4380 except Exception as exp:
4381 self.logger.error("find_disk() : exception occurred while "\
4382 "getting existing disk details :{}".format(exp))
4383 return disk_info
4384
4385
4386 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4387 """
4388 Method to add existing disk in VM
4389 Args :
4390 vcenter_conect - vCenter content object
4391 vm - vCenter vm object
4392 disk_info : dict of disk details
4393 Returns:
4394 status : status of add disk task
4395 """
4396 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4397 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4398 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4399 if size is not None:
4400 #Convert size from GB to KB
4401 sizeKB = int(size) * 1024 * 1024
4402 #compare size of existing disk and user given size.Assign whicherver is greater
4403 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4404 sizeKB, capacityKB))
4405 if sizeKB > capacityKB:
4406 capacityKB = sizeKB
4407
4408 if datastore and fullpath and capacityKB:
4409 try:
4410 spec = vim.vm.ConfigSpec()
4411 # get all disks on a VM, set unit_number to the next available
4412 unit_number = 0
4413 for dev in vm.config.hardware.device:
4414 if hasattr(dev.backing, 'fileName'):
4415 unit_number = int(dev.unitNumber) + 1
4416 # unit_number 7 reserved for scsi controller
4417 if unit_number == 7:
4418 unit_number += 1
4419 if isinstance(dev, vim.vm.device.VirtualDisk):
4420 #vim.vm.device.VirtualSCSIController
4421 controller_key = dev.controllerKey
4422
4423 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4424 unit_number, controller_key))
4425 # add disk here
4426 dev_changes = []
4427 disk_spec = vim.vm.device.VirtualDeviceSpec()
4428 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4429 disk_spec.device = vim.vm.device.VirtualDisk()
4430 disk_spec.device.backing = \
4431 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4432 disk_spec.device.backing.thinProvisioned = True
4433 disk_spec.device.backing.diskMode = 'persistent'
4434 disk_spec.device.backing.datastore = datastore
4435 disk_spec.device.backing.fileName = fullpath
4436
4437 disk_spec.device.unitNumber = unit_number
4438 disk_spec.device.capacityInKB = capacityKB
4439 disk_spec.device.controllerKey = controller_key
4440 dev_changes.append(disk_spec)
4441 spec.deviceChange = dev_changes
4442 task = vm.ReconfigVM_Task(spec=spec)
4443 status = self.wait_for_vcenter_task(task, vcenter_conect)
4444 return status
4445 except Exception as exp:
4446 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4447 "{} to vm {}".format(exp,
4448 fullpath,
4449 vm.config.name)
4450 self.rollback_newvm(vapp_uuid, exp_msg)
4451 else:
4452 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4453 self.rollback_newvm(vapp_uuid, msg)
4454
4455
4456 def get_vcenter_content(self):
4457 """
4458 Get the vsphere content object
4459 """
4460 try:
4461 vm_vcenter_info = self.get_vm_vcenter_info()
4462 except Exception as exp:
4463 self.logger.error("Error occurred while getting vCenter infromationn"\
4464 " for VM : {}".format(exp))
4465 raise vimconn.vimconnException(message=exp)
4466
4467 context = None
4468 if hasattr(ssl, '_create_unverified_context'):
4469 context = ssl._create_unverified_context()
4470
4471 vcenter_conect = SmartConnect(
4472 host=vm_vcenter_info["vm_vcenter_ip"],
4473 user=vm_vcenter_info["vm_vcenter_user"],
4474 pwd=vm_vcenter_info["vm_vcenter_password"],
4475 port=int(vm_vcenter_info["vm_vcenter_port"]),
4476 sslContext=context
4477 )
4478 atexit.register(Disconnect, vcenter_conect)
4479 content = vcenter_conect.RetrieveContent()
4480 return vcenter_conect, content
4481
4482
4483 def get_vm_moref_id(self, vapp_uuid):
4484 """
4485 Get the moref_id of given VM
4486 """
4487 try:
4488 if vapp_uuid:
4489 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4490 if vm_details and "vm_vcenter_info" in vm_details:
4491 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4492
4493 return vm_moref_id
4494
4495 except Exception as exp:
4496 self.logger.error("Error occurred while getting VM moref ID "\
4497 " for VM : {}".format(exp))
4498 return None
4499
4500
4501 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4502 """
4503 Method to get vApp template details
4504 Args :
4505 catalogs - list of VDC catalogs
4506 image_id - Catalog ID to find
4507 template_name : template name in catalog
4508 Returns:
4509 parsed_respond : dict of vApp tempalte details
4510 """
4511 parsed_response = {}
4512
4513 vca = self.connect_as_admin()
4514 if not vca:
4515 raise vimconn.vimconnConnectionException("self.connect() is failed")
4516
4517 try:
4518 catalog = self.get_catalog_obj(image_id, catalogs)
4519 if catalog:
4520 template_name = self.get_catalogbyid(image_id, catalogs)
4521 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4522 if len(catalog_items) == 1:
4523 response = Http.get(catalog_items[0].get_href(),
4524 headers=vca.vcloud_session.get_vcloud_headers(),
4525 verify=vca.verify,
4526 logger=vca.logger)
4527 catalogItem = XmlElementTree.fromstring(response.content)
4528 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4529 vapp_tempalte_href = entity.get("href")
4530 #get vapp details and parse moref id
4531
4532 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4533 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4534 'vmw': 'http://www.vmware.com/schema/ovf',
4535 'vm': 'http://www.vmware.com/vcloud/v1.5',
4536 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4537 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4538 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4539 }
4540
4541 if vca.vcloud_session and vca.vcloud_session.organization:
4542 response = Http.get(url=vapp_tempalte_href,
4543 headers=vca.vcloud_session.get_vcloud_headers(),
4544 verify=vca.verify,
4545 logger=vca.logger
4546 )
4547
4548 if response.status_code != requests.codes.ok:
4549 self.logger.debug("REST API call {} failed. Return status code {}".format(
4550 vapp_tempalte_href, response.status_code))
4551
4552 else:
4553 xmlroot_respond = XmlElementTree.fromstring(response.content)
4554 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4555 if children_section is not None:
4556 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4557 if vCloud_extension_section is not None:
4558 vm_vcenter_info = {}
4559 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4560 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4561 if vmext is not None:
4562 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4563 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4564
4565 except Exception as exp :
4566 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4567
4568 return parsed_response
4569
4570
4571 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4572 """
4573 Method to delete vApp
4574 Args :
4575 vapp_uuid - vApp UUID
4576 msg - Error message to be logged
4577 exp_type : Exception type
4578 Returns:
4579 None
4580 """
4581 if vapp_uuid:
4582 status = self.delete_vminstance(vapp_uuid)
4583 else:
4584 msg = "No vApp ID"
4585 self.logger.error(msg)
4586 if exp_type == "Genric":
4587 raise vimconn.vimconnException(msg)
4588 elif exp_type == "NotFound":
4589 raise vimconn.vimconnNotFoundException(message=msg)
4590
4591 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4592 """
4593 Method to attach SRIOV adapters to VM
4594
4595 Args:
4596 vapp_uuid - uuid of vApp/VM
4597 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4598 vmname_andid - vmname
4599
4600 Returns:
4601 The status of add SRIOV adapter task , vm object and
4602 vcenter_conect object
4603 """
4604 vm_obj = None
4605 vcenter_conect, content = self.get_vcenter_content()
4606 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4607
4608 if vm_moref_id:
4609 try:
4610 no_of_sriov_devices = len(sriov_nets)
4611 if no_of_sriov_devices > 0:
4612 #Get VM and its host
4613 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4614 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4615 if host_obj and vm_obj:
4616 #get SRIOV devies from host on which vapp is currently installed
4617 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4618 no_of_sriov_devices,
4619 )
4620
4621 if len(avilable_sriov_devices) == 0:
4622 #find other hosts with active pci devices
4623 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4624 content,
4625 no_of_sriov_devices,
4626 )
4627
4628 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4629 #Migrate vm to the host where SRIOV devices are available
4630 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4631 new_host_obj))
4632 task = self.relocate_vm(new_host_obj, vm_obj)
4633 if task is not None:
4634 result = self.wait_for_vcenter_task(task, vcenter_conect)
4635 self.logger.info("Migrate VM status: {}".format(result))
4636 host_obj = new_host_obj
4637 else:
4638 self.logger.info("Fail to migrate VM : {}".format(result))
4639 raise vimconn.vimconnNotFoundException(
4640 "Fail to migrate VM : {} to host {}".format(
4641 vmname_andid,
4642 new_host_obj)
4643 )
4644
4645 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4646 #Add SRIOV devices one by one
4647 for sriov_net in sriov_nets:
4648 network_name = sriov_net.get('net_id')
4649 dvs_portgr_name = self.create_dvPort_group(network_name)
4650 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
4651 #add vlan ID ,Modify portgroup for vlan ID
4652 self.configure_vlanID(content, vcenter_conect, network_name)
4653
4654 task = self.add_sriov_to_vm(content,
4655 vm_obj,
4656 host_obj,
4657 network_name,
4658 avilable_sriov_devices[0]
4659 )
4660 if task:
4661 status= self.wait_for_vcenter_task(task, vcenter_conect)
4662 if status:
4663 self.logger.info("Added SRIOV {} to VM {}".format(
4664 no_of_sriov_devices,
4665 str(vm_obj)))
4666 else:
4667 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4668 no_of_sriov_devices,
4669 str(vm_obj)))
4670 raise vimconn.vimconnUnexpectedResponse(
4671 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4672 )
4673 return True, vm_obj, vcenter_conect
4674 else:
4675 self.logger.error("Currently there is no host with"\
4676 " {} number of avaialble SRIOV "\
4677 "VFs required for VM {}".format(
4678 no_of_sriov_devices,
4679 vmname_andid)
4680 )
4681 raise vimconn.vimconnNotFoundException(
4682 "Currently there is no host with {} "\
4683 "number of avaialble SRIOV devices required for VM {}".format(
4684 no_of_sriov_devices,
4685 vmname_andid))
4686 else:
4687 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4688
4689 except vmodl.MethodFault as error:
4690 self.logger.error("Error occurred while adding SRIOV {} ",error)
4691 return None, vm_obj, vcenter_conect
4692
4693
4694 def get_sriov_devices(self,host, no_of_vfs):
4695 """
4696 Method to get the details of SRIOV devices on given host
4697 Args:
4698 host - vSphere host object
4699 no_of_vfs - number of VFs needed on host
4700
4701 Returns:
4702 array of SRIOV devices
4703 """
4704 sriovInfo=[]
4705 if host:
4706 for device in host.config.pciPassthruInfo:
4707 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4708 if device.numVirtualFunction >= no_of_vfs:
4709 sriovInfo.append(device)
4710 break
4711 return sriovInfo
4712
4713
4714 def get_host_and_sriov_devices(self, content, no_of_vfs):
4715 """
4716 Method to get the details of SRIOV devices infromation on all hosts
4717
4718 Args:
4719 content - vSphere host object
4720 no_of_vfs - number of pci VFs needed on host
4721
4722 Returns:
4723 array of SRIOV devices and host object
4724 """
4725 host_obj = None
4726 sriov_device_objs = None
4727 try:
4728 if content:
4729 container = content.viewManager.CreateContainerView(content.rootFolder,
4730 [vim.HostSystem], True)
4731 for host in container.view:
4732 devices = self.get_sriov_devices(host, no_of_vfs)
4733 if devices:
4734 host_obj = host
4735 sriov_device_objs = devices
4736 break
4737 except Exception as exp:
4738 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4739
4740 return host_obj,sriov_device_objs
4741
4742
4743 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4744 """
4745 Method to add SRIOV adapter to vm
4746
4747 Args:
4748 host_obj - vSphere host object
4749 vm_obj - vSphere vm object
4750 content - vCenter content object
4751 network_name - name of distributed virtaul portgroup
4752 sriov_device - SRIOV device info
4753
4754 Returns:
4755 task object
4756 """
4757 devices = []
4758 vnic_label = "sriov nic"
4759 try:
4760 dvs_portgr = self.get_dvport_group(network_name)
4761 network_name = dvs_portgr.name
4762 nic = vim.vm.device.VirtualDeviceSpec()
4763 # VM device
4764 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4765 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4766 nic.device.addressType = 'assigned'
4767 #nic.device.key = 13016
4768 nic.device.deviceInfo = vim.Description()
4769 nic.device.deviceInfo.label = vnic_label
4770 nic.device.deviceInfo.summary = network_name
4771 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4772
4773 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4774 nic.device.backing.deviceName = network_name
4775 nic.device.backing.useAutoDetect = False
4776 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4777 nic.device.connectable.startConnected = True
4778 nic.device.connectable.allowGuestControl = True
4779
4780 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4781 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4782 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4783
4784 devices.append(nic)
4785 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4786 task = vm_obj.ReconfigVM_Task(vmconf)
4787 return task
4788 except Exception as exp:
4789 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4790 return None
4791
4792
4793 def create_dvPort_group(self, network_name):
4794 """
4795 Method to create disributed virtual portgroup
4796
4797 Args:
4798 network_name - name of network/portgroup
4799
4800 Returns:
4801 portgroup key
4802 """
4803 try:
4804 new_network_name = [network_name, '-', str(uuid.uuid4())]
4805 network_name=''.join(new_network_name)
4806 vcenter_conect, content = self.get_vcenter_content()
4807
4808 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4809 if dv_switch:
4810 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4811 dv_pg_spec.name = network_name
4812
4813 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4814 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4815 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4816 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4817 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4818 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4819
4820 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4821 self.wait_for_vcenter_task(task, vcenter_conect)
4822
4823 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4824 if dvPort_group:
4825 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4826 return dvPort_group.key
4827 else:
4828 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4829
4830 except Exception as exp:
4831 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4832 " : {}".format(network_name, exp))
4833 return None
4834
4835 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4836 """
4837 Method to reconfigure disributed virtual portgroup
4838
4839 Args:
4840 dvPort_group_name - name of disributed virtual portgroup
4841 content - vCenter content object
4842 config_info - disributed virtual portgroup configuration
4843
4844 Returns:
4845 task object
4846 """
4847 try:
4848 dvPort_group = self.get_dvport_group(dvPort_group_name)
4849 if dvPort_group:
4850 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4851 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4852 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4853 if "vlanID" in config_info:
4854 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4855 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4856
4857 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4858 return task
4859 else:
4860 return None
4861 except Exception as exp:
4862 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4863 " : {}".format(dvPort_group_name, exp))
4864 return None
4865
4866
4867 def destroy_dvport_group(self , dvPort_group_name):
4868 """
4869 Method to destroy disributed virtual portgroup
4870
4871 Args:
4872 network_name - name of network/portgroup
4873
4874 Returns:
4875 True if portgroup successfully got deleted else false
4876 """
4877 vcenter_conect, content = self.get_vcenter_content()
4878 try:
4879 status = None
4880 dvPort_group = self.get_dvport_group(dvPort_group_name)
4881 if dvPort_group:
4882 task = dvPort_group.Destroy_Task()
4883 status = self.wait_for_vcenter_task(task, vcenter_conect)
4884 return status
4885 except vmodl.MethodFault as exp:
4886 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4887 exp, dvPort_group_name))
4888 return None
4889
4890
4891 def get_dvport_group(self, dvPort_group_name):
4892 """
4893 Method to get disributed virtual portgroup
4894
4895 Args:
4896 network_name - name of network/portgroup
4897
4898 Returns:
4899 portgroup object
4900 """
4901 vcenter_conect, content = self.get_vcenter_content()
4902 dvPort_group = None
4903 try:
4904 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4905 for item in container.view:
4906 if item.key == dvPort_group_name:
4907 dvPort_group = item
4908 break
4909 return dvPort_group
4910 except vmodl.MethodFault as exp:
4911 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4912 exp, dvPort_group_name))
4913 return None
4914
4915 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4916 """
4917 Method to get disributed virtual portgroup vlanID
4918
4919 Args:
4920 network_name - name of network/portgroup
4921
4922 Returns:
4923 vlan ID
4924 """
4925 vlanId = None
4926 try:
4927 dvPort_group = self.get_dvport_group(dvPort_group_name)
4928 if dvPort_group:
4929 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4930 except vmodl.MethodFault as exp:
4931 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4932 exp, dvPort_group_name))
4933 return vlanId
4934
4935
4936 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4937 """
4938 Method to configure vlanID in disributed virtual portgroup vlanID
4939
4940 Args:
4941 network_name - name of network/portgroup
4942
4943 Returns:
4944 None
4945 """
4946 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4947 if vlanID == 0:
4948 #configure vlanID
4949 vlanID = self.genrate_vlanID(dvPort_group_name)
4950 config = {"vlanID":vlanID}
4951 task = self.reconfig_portgroup(content, dvPort_group_name,
4952 config_info=config)
4953 if task:
4954 status= self.wait_for_vcenter_task(task, vcenter_conect)
4955 if status:
4956 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4957 dvPort_group_name,vlanID))
4958 else:
4959 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4960 dvPort_group_name, vlanID))
4961
4962
4963 def genrate_vlanID(self, network_name):
4964 """
4965 Method to get unused vlanID
4966 Args:
4967 network_name - name of network/portgroup
4968 Returns:
4969 vlanID
4970 """
4971 vlan_id = None
4972 used_ids = []
4973 if self.config.get('vlanID_range') == None:
4974 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4975 "at config value before creating sriov network with vlan tag")
4976 if "used_vlanIDs" not in self.persistent_info:
4977 self.persistent_info["used_vlanIDs"] = {}
4978 else:
4979 used_ids = self.persistent_info["used_vlanIDs"].values()
4980
4981 for vlanID_range in self.config.get('vlanID_range'):
4982 start_vlanid , end_vlanid = vlanID_range.split("-")
4983 if start_vlanid > end_vlanid:
4984 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4985 vlanID_range))
4986
4987 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4988 if id not in used_ids:
4989 vlan_id = id
4990 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4991 return vlan_id
4992 if vlan_id is None:
4993 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4994
4995
4996 def get_obj(self, content, vimtype, name):
4997 """
4998 Get the vsphere object associated with a given text name
4999 """
5000 obj = None
5001 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
5002 for item in container.view:
5003 if item.name == name:
5004 obj = item
5005 break
5006 return obj
5007
5008
5009 def insert_media_to_vm(self, vapp, image_id):
5010 """
5011 Method to insert media CD-ROM (ISO image) from catalog to vm.
5012 vapp - vapp object to get vm id
5013 Image_id - image id for cdrom to be inerted to vm
5014 """
5015 # create connection object
5016 vca = self.connect()
5017 try:
5018 # fetching catalog details
5019 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5020 response = Http.get(url=rest_url,
5021 headers=vca.vcloud_session.get_vcloud_headers(),
5022 verify=vca.verify,
5023 logger=vca.logger)
5024
5025 if response.status_code != 200:
5026 self.logger.error("REST call {} failed reason : {}"\
5027 "status code : {}".format(url_rest_call,
5028 response.content,
5029 response.status_code))
5030 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5031 "catalog details")
5032 # searching iso name and id
5033 iso_name,media_id = self.get_media_details(vca, response.content)
5034
5035 if iso_name and media_id:
5036 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5037 <ns6:MediaInsertOrEjectParams
5038 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5039 <ns6:Media
5040 type="application/vnd.vmware.vcloud.media+xml"
5041 name="{}.iso"
5042 id="urn:vcloud:media:{}"
5043 href="https://{}/api/media/{}"/>
5044 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5045 vca.host,media_id)
5046
5047 for vms in vapp._get_vms():
5048 vm_id = (vms.id).split(':')[-1]
5049
5050 headers = vca.vcloud_session.get_vcloud_headers()
5051 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5052 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5053
5054 response = Http.post(url=rest_url,
5055 headers=headers,
5056 data=data,
5057 verify=vca.verify,
5058 logger=vca.logger)
5059
5060 if response.status_code != 202:
5061 self.logger.error("Failed to insert CD-ROM to vm")
5062 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5063 "ISO image to vm")
5064 else:
5065 task = taskType.parseString(response.content, True)
5066 if isinstance(task, GenericTask):
5067 vca.block_until_completed(task)
5068 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5069 " image to vm {}".format(vm_id))
5070 except Exception as exp:
5071 self.logger.error("insert_media_to_vm() : exception occurred "\
5072 "while inserting media CD-ROM")
5073 raise vimconn.vimconnException(message=exp)
5074
5075
5076 def get_media_details(self, vca, content):
5077 """
5078 Method to get catalog item details
5079 vca - connection object
5080 content - Catalog details
5081 Return - Media name, media id
5082 """
5083 cataloghref_list = []
5084 try:
5085 if content:
5086 vm_list_xmlroot = XmlElementTree.fromstring(content)
5087 for child in vm_list_xmlroot.iter():
5088 if 'CatalogItem' in child.tag:
5089 cataloghref_list.append(child.attrib.get('href'))
5090 if cataloghref_list is not None:
5091 for href in cataloghref_list:
5092 if href:
5093 response = Http.get(url=href,
5094 headers=vca.vcloud_session.get_vcloud_headers(),
5095 verify=vca.verify,
5096 logger=vca.logger)
5097 if response.status_code != 200:
5098 self.logger.error("REST call {} failed reason : {}"\
5099 "status code : {}".format(href,
5100 response.content,
5101 response.status_code))
5102 raise vimconn.vimconnException("get_media_details : Failed to get "\
5103 "catalogitem details")
5104 list_xmlroot = XmlElementTree.fromstring(response.content)
5105 for child in list_xmlroot.iter():
5106 if 'Entity' in child.tag:
5107 if 'media' in child.attrib.get('href'):
5108 name = child.attrib.get('name')
5109 media_id = child.attrib.get('href').split('/').pop()
5110 return name,media_id
5111 else:
5112 self.logger.debug("Media name and id not found")
5113 return False,False
5114 except Exception as exp:
5115 self.logger.error("get_media_details : exception occurred "\
5116 "getting media details")
5117 raise vimconn.vimconnException(message=exp)
5118
5119
5120 def retry_rest(self, method, url, add_headers=None, data=None):
5121 """ Method to get Token & retry respective REST request
5122 Args:
5123 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5124 url - request url to be used
5125 add_headers - Additional headers (optional)
5126 data - Request payload data to be passed in request
5127 Returns:
5128 response - Response of request
5129 """
5130 response = None
5131
5132 #Get token
5133 self.get_token()
5134
5135 headers=self.vca.vcloud_session.get_vcloud_headers()
5136
5137 if add_headers:
5138 headers.update(add_headers)
5139
5140 if method == 'GET':
5141 response = Http.get(url=url,
5142 headers=headers,
5143 verify=self.vca.verify,
5144 logger=self.vca.logger)
5145 elif method == 'PUT':
5146 response = Http.put(url=url,
5147 data=data,
5148 headers=headers,
5149 verify=self.vca.verify,
5150 logger=self.logger)
5151 elif method == 'POST':
5152 response = Http.post(url=url,
5153 headers=headers,
5154 data=data,
5155 verify=self.vca.verify,
5156 logger=self.vca.logger)
5157 elif method == 'DELETE':
5158 response = Http.delete(url=url,
5159 headers=headers,
5160 verify=self.vca.verify,
5161 logger=self.vca.logger)
5162 return response
5163
5164
5165 def get_token(self):
5166 """ Generate a new token if expired
5167
5168 Returns:
5169 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5170 """
5171 vca = None
5172
5173 try:
5174 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5175 self.user,
5176 self.org_name))
5177 vca = VCA(host=self.url,
5178 username=self.user,
5179 service_type=STANDALONE,
5180 version=VCAVERSION,
5181 verify=False,
5182 log=False)
5183
5184 result = vca.login(password=self.passwd, org=self.org_name)
5185 if result is True:
5186 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5187 if result is True:
5188 self.logger.info(
5189 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5190 #Update vca
5191 self.vca = vca
5192 return
5193
5194 except:
5195 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5196 "{} as user: {}".format(self.org_name, self.user))
5197
5198 if not vca or not result:
5199 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5200
5201
5202 def get_vdc_details(self):
5203 """ Get VDC details using pyVcloud Lib
5204
5205 Returns vdc object
5206 """
5207 vdc = self.vca.get_vdc(self.tenant_name)
5208
5209 #Retry once, if failed by refreshing token
5210 if vdc is None:
5211 self.get_token()
5212 vdc = self.vca.get_vdc(self.tenant_name)
5213
5214 return vdc
5215
5216