Merge branch 'vio' into v2.0
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 for network in networks:
675 vdc_network_id = network.get_id().split(":")
676 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
677 filter_dict["name"] = network.get_name()
678 filter_dict["id"] = vdc_network_id[3]
679 filter_dict["shared"] = network.get_IsShared()
680 filter_dict["tenant_id"] = vdc_id
681 if network.get_status() == 1:
682 filter_dict["admin_state_up"] = True
683 else:
684 filter_dict["admin_state_up"] = False
685 filter_dict["status"] = "ACTIVE"
686 filter_dict["type"] = "bridge"
687 self.logger.debug("Returning {}".format(filter_dict))
688 return filter_dict
689 except:
690 self.logger.debug("Error in get_network")
691 self.logger.debug(traceback.format_exc())
692
693 return filter_dict
694
695 def delete_network(self, net_id):
696 """
697 Method Deletes a tenant network from VIM, provide the network id.
698
699 Returns the network identifier or raise an exception
700 """
701
702 # ############# Stub code for SRIOV #################
703 # dvport_group = self.get_dvport_group(net_id)
704 # if dvport_group:
705 # #delete portgroup
706 # status = self.destroy_dvport_group(net_id)
707 # if status:
708 # # Remove vlanID from persistent info
709 # if net_id in self.persistent_info["used_vlanIDs"]:
710 # del self.persistent_info["used_vlanIDs"][net_id]
711 #
712 # return net_id
713
714 vcd_network = self.get_vcd_network(network_uuid=net_id)
715 if vcd_network is not None and vcd_network:
716 if self.delete_network_action(network_uuid=net_id):
717 return net_id
718 else:
719 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
720
721 def refresh_nets_status(self, net_list):
722 """Get the status of the networks
723 Params: the list of network identifiers
724 Returns a dictionary with:
725 net_id: #VIM id of this network
726 status: #Mandatory. Text with one of:
727 # DELETED (not found at vim)
728 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
729 # OTHER (Vim reported other status not understood)
730 # ERROR (VIM indicates an ERROR status)
731 # ACTIVE, INACTIVE, DOWN (admin down),
732 # BUILD (on building process)
733 #
734 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
735 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
736
737 """
738
739 dict_entry = {}
740 try:
741 for net in net_list:
742 errormsg = ''
743 vcd_network = self.get_vcd_network(network_uuid=net)
744 if vcd_network is not None and vcd_network:
745 if vcd_network['status'] == '1':
746 status = 'ACTIVE'
747 else:
748 status = 'DOWN'
749 else:
750 status = 'DELETED'
751 errormsg = 'Network not found.'
752
753 dict_entry[net] = {'status': status, 'error_msg': errormsg,
754 'vim_info': yaml.safe_dump(vcd_network)}
755 except:
756 self.logger.debug("Error in refresh_nets_status")
757 self.logger.debug(traceback.format_exc())
758
759 return dict_entry
760
761 def get_flavor(self, flavor_id):
762 """Obtain flavor details from the VIM
763 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
764 """
765 if flavor_id not in vimconnector.flavorlist:
766 raise vimconn.vimconnNotFoundException("Flavor not found.")
767 return vimconnector.flavorlist[flavor_id]
768
769 def new_flavor(self, flavor_data):
770 """Adds a tenant flavor to VIM
771 flavor_data contains a dictionary with information, keys:
772 name: flavor name
773 ram: memory (cloud type) in MBytes
774 vpcus: cpus (cloud type)
775 extended: EPA parameters
776 - numas: #items requested in same NUMA
777 memory: number of 1G huge pages memory
778 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
779 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
780 - name: interface name
781 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
782 bandwidth: X Gbps; requested guarantee bandwidth
783 vpci: requested virtual PCI address
784 disk: disk size
785 is_public:
786 #TODO to concrete
787 Returns the flavor identifier"""
788
789 # generate a new uuid put to internal dict and return it.
790 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
791 new_flavor=flavor_data
792 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
793 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
794 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
795
796 if not isinstance(ram, int):
797 raise vimconn.vimconnException("Non-integer value for ram")
798 elif not isinstance(cpu, int):
799 raise vimconn.vimconnException("Non-integer value for cpu")
800 elif not isinstance(disk, int):
801 raise vimconn.vimconnException("Non-integer value for disk")
802
803 extended_flv = flavor_data.get("extended")
804 if extended_flv:
805 numas=extended_flv.get("numas")
806 if numas:
807 for numa in numas:
808 #overwrite ram and vcpus
809 ram = numa['memory']*1024
810 if 'paired-threads' in numa:
811 cpu = numa['paired-threads']*2
812 elif 'cores' in numa:
813 cpu = numa['cores']
814 elif 'threads' in numa:
815 cpu = numa['threads']
816
817 new_flavor[FLAVOR_RAM_KEY] = ram
818 new_flavor[FLAVOR_VCPUS_KEY] = cpu
819 new_flavor[FLAVOR_DISK_KEY] = disk
820 # generate a new uuid put to internal dict and return it.
821 flavor_id = uuid.uuid4()
822 vimconnector.flavorlist[str(flavor_id)] = new_flavor
823 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
824
825 return str(flavor_id)
826
827 def delete_flavor(self, flavor_id):
828 """Deletes a tenant flavor from VIM identify by its id
829
830 Returns the used id or raise an exception
831 """
832 if flavor_id not in vimconnector.flavorlist:
833 raise vimconn.vimconnNotFoundException("Flavor not found.")
834
835 vimconnector.flavorlist.pop(flavor_id, None)
836 return flavor_id
837
838 def new_image(self, image_dict):
839 """
840 Adds a tenant image to VIM
841 Returns:
842 200, image-id if the image is created
843 <0, message if there is an error
844 """
845
846 return self.get_image_id_from_path(image_dict['location'])
847
848 def delete_image(self, image_id):
849 """
850 Deletes a tenant image from VIM
851 Args:
852 image_id is ID of Image to be deleted
853 Return:
854 returns the image identifier in UUID format or raises an exception on error
855 """
856 vca = self.connect_as_admin()
857 if not vca:
858 raise vimconn.vimconnConnectionException("self.connect() is failed")
859 # Get Catalog details
860 url_list = [self.vca.host, '/api/catalog/', image_id]
861 catalog_herf = ''.join(url_list)
862 response = Http.get(url=catalog_herf,
863 headers=vca.vcloud_session.get_vcloud_headers(),
864 verify=vca.verify,
865 logger=vca.logger)
866
867 if response.status_code != requests.codes.ok:
868 self.logger.debug("delete_image():GET REST API call {} failed. "\
869 "Return status code {}".format(catalog_herf,
870 response.status_code))
871 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
872
873 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
874 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
875 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
876
877 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
878 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
879 for catalogItem in catalogItems:
880 catalogItem_href = catalogItem.attrib['href']
881
882 #GET details of catalogItem
883 response = Http.get(url=catalogItem_href,
884 headers=vca.vcloud_session.get_vcloud_headers(),
885 verify=vca.verify,
886 logger=vca.logger)
887
888 if response.status_code != requests.codes.ok:
889 self.logger.debug("delete_image():GET REST API call {} failed. "\
890 "Return status code {}".format(catalog_herf,
891 response.status_code))
892 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
893 catalogItem,
894 image_id))
895
896 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
897 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
898 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
899 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
900
901 #Remove catalogItem
902 response = Http.delete(url= catalogitem_remove_href,
903 headers=vca.vcloud_session.get_vcloud_headers(),
904 verify=vca.verify,
905 logger=vca.logger)
906 if response.status_code == requests.codes.no_content:
907 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
908 else:
909 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
910
911 #Remove catalog
912 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
913 catalog_remove_herf = ''.join(url_list)
914 response = Http.delete(url= catalog_remove_herf,
915 headers=vca.vcloud_session.get_vcloud_headers(),
916 verify=vca.verify,
917 logger=vca.logger)
918
919 if response.status_code == requests.codes.no_content:
920 self.logger.debug("Deleted Catalog {}".format(image_id))
921 return image_id
922 else:
923 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
924
925
926 def catalog_exists(self, catalog_name, catalogs):
927 """
928
929 :param catalog_name:
930 :param catalogs:
931 :return:
932 """
933 for catalog in catalogs:
934 if catalog.name == catalog_name:
935 return True
936 return False
937
938 def create_vimcatalog(self, vca=None, catalog_name=None):
939 """ Create new catalog entry in vCloud director.
940
941 Args
942 vca: vCloud director.
943 catalog_name catalog that client wish to create. Note no validation done for a name.
944 Client must make sure that provide valid string representation.
945
946 Return (bool) True if catalog created.
947
948 """
949 try:
950 task = vca.create_catalog(catalog_name, catalog_name)
951 result = vca.block_until_completed(task)
952 if not result:
953 return False
954 catalogs = vca.get_catalogs()
955 except:
956 return False
957 return self.catalog_exists(catalog_name, catalogs)
958
959 # noinspection PyIncorrectDocstring
960 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
961 description='', progress=False, chunk_bytes=128 * 1024):
962 """
963 Uploads a OVF file to a vCloud catalog
964
965 :param chunk_bytes:
966 :param progress:
967 :param description:
968 :param image_name:
969 :param vca:
970 :param catalog_name: (str): The name of the catalog to upload the media.
971 :param media_file_name: (str): The name of the local media file to upload.
972 :return: (bool) True if the media file was successfully uploaded, false otherwise.
973 """
974 os.path.isfile(media_file_name)
975 statinfo = os.stat(media_file_name)
976
977 # find a catalog entry where we upload OVF.
978 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
979 # status change.
980 # if VCD can parse OVF we upload VMDK file
981 try:
982 for catalog in vca.get_catalogs():
983 if catalog_name != catalog.name:
984 continue
985 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
986 link.get_rel() == 'add', catalog.get_Link())
987 assert len(link) == 1
988 data = """
989 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
990 """ % (escape(catalog_name), escape(description))
991 headers = vca.vcloud_session.get_vcloud_headers()
992 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
993 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
994 if response.status_code == requests.codes.created:
995 catalogItem = XmlElementTree.fromstring(response.content)
996 entity = [child for child in catalogItem if
997 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
998 href = entity.get('href')
999 template = href
1000 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1001 verify=vca.verify, logger=self.logger)
1002
1003 if response.status_code == requests.codes.ok:
1004 media = mediaType.parseString(response.content, True)
1005 link = filter(lambda link: link.get_rel() == 'upload:default',
1006 media.get_Files().get_File()[0].get_Link())[0]
1007 headers = vca.vcloud_session.get_vcloud_headers()
1008 headers['Content-Type'] = 'Content-Type text/xml'
1009 response = Http.put(link.get_href(),
1010 data=open(media_file_name, 'rb'),
1011 headers=headers,
1012 verify=vca.verify, logger=self.logger)
1013 if response.status_code != requests.codes.ok:
1014 self.logger.debug(
1015 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1016 media_file_name))
1017 return False
1018
1019 # TODO fix this with aync block
1020 time.sleep(5)
1021
1022 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1023
1024 # uploading VMDK file
1025 # check status of OVF upload and upload remaining files.
1026 response = Http.get(template,
1027 headers=vca.vcloud_session.get_vcloud_headers(),
1028 verify=vca.verify,
1029 logger=self.logger)
1030
1031 if response.status_code == requests.codes.ok:
1032 media = mediaType.parseString(response.content, True)
1033 number_of_files = len(media.get_Files().get_File())
1034 for index in xrange(0, number_of_files):
1035 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1036 media.get_Files().get_File()[index].get_Link())
1037 for link in links_list:
1038 # we skip ovf since it already uploaded.
1039 if 'ovf' in link.get_href():
1040 continue
1041 # The OVF file and VMDK must be in a same directory
1042 head, tail = os.path.split(media_file_name)
1043 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1044 if not os.path.isfile(file_vmdk):
1045 return False
1046 statinfo = os.stat(file_vmdk)
1047 if statinfo.st_size == 0:
1048 return False
1049 hrefvmdk = link.get_href()
1050
1051 if progress:
1052 print("Uploading file: {}".format(file_vmdk))
1053 if progress:
1054 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1055 FileTransferSpeed()]
1056 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1057
1058 bytes_transferred = 0
1059 f = open(file_vmdk, 'rb')
1060 while bytes_transferred < statinfo.st_size:
1061 my_bytes = f.read(chunk_bytes)
1062 if len(my_bytes) <= chunk_bytes:
1063 headers = vca.vcloud_session.get_vcloud_headers()
1064 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1065 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1066 headers['Content-Length'] = str(len(my_bytes))
1067 response = Http.put(hrefvmdk,
1068 headers=headers,
1069 data=my_bytes,
1070 verify=vca.verify,
1071 logger=None)
1072
1073 if response.status_code == requests.codes.ok:
1074 bytes_transferred += len(my_bytes)
1075 if progress:
1076 progress_bar.update(bytes_transferred)
1077 else:
1078 self.logger.debug(
1079 'file upload failed with error: [%s] %s' % (response.status_code,
1080 response.content))
1081
1082 f.close()
1083 return False
1084 f.close()
1085 if progress:
1086 progress_bar.finish()
1087 time.sleep(10)
1088 return True
1089 else:
1090 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1091 format(catalog_name, media_file_name))
1092 return False
1093 except Exception as exp:
1094 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1095 .format(catalog_name,media_file_name, exp))
1096 raise vimconn.vimconnException(
1097 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1098 .format(catalog_name,media_file_name, exp))
1099
1100 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1101 return False
1102
1103 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1104 """Upload media file"""
1105 # TODO add named parameters for readability
1106
1107 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1108 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1109
1110 def validate_uuid4(self, uuid_string=None):
1111 """ Method validate correct format of UUID.
1112
1113 Return: true if string represent valid uuid
1114 """
1115 try:
1116 val = uuid.UUID(uuid_string, version=4)
1117 except ValueError:
1118 return False
1119 return True
1120
1121 def get_catalogid(self, catalog_name=None, catalogs=None):
1122 """ Method check catalog and return catalog ID in UUID format.
1123
1124 Args
1125 catalog_name: catalog name as string
1126 catalogs: list of catalogs.
1127
1128 Return: catalogs uuid
1129 """
1130
1131 for catalog in catalogs:
1132 if catalog.name == catalog_name:
1133 catalog_id = catalog.get_id().split(":")
1134 return catalog_id[3]
1135 return None
1136
1137 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1138 """ Method check catalog and return catalog name lookup done by catalog UUID.
1139
1140 Args
1141 catalog_name: catalog name as string
1142 catalogs: list of catalogs.
1143
1144 Return: catalogs name or None
1145 """
1146
1147 if not self.validate_uuid4(uuid_string=catalog_uuid):
1148 return None
1149
1150 for catalog in catalogs:
1151 catalog_id = catalog.get_id().split(":")[3]
1152 if catalog_id == catalog_uuid:
1153 return catalog.name
1154 return None
1155
1156 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1157 """ Method check catalog and return catalog name lookup done by catalog UUID.
1158
1159 Args
1160 catalog_name: catalog name as string
1161 catalogs: list of catalogs.
1162
1163 Return: catalogs name or None
1164 """
1165
1166 if not self.validate_uuid4(uuid_string=catalog_uuid):
1167 return None
1168
1169 for catalog in catalogs:
1170 catalog_id = catalog.get_id().split(":")[3]
1171 if catalog_id == catalog_uuid:
1172 return catalog
1173 return None
1174
1175 def get_image_id_from_path(self, path=None, progress=False):
1176 """ Method upload OVF image to vCloud director.
1177
1178 Each OVF image represented as single catalog entry in vcloud director.
1179 The method check for existing catalog entry. The check done by file name without file extension.
1180
1181 if given catalog name already present method will respond with existing catalog uuid otherwise
1182 it will create new catalog entry and upload OVF file to newly created catalog.
1183
1184 If method can't create catalog entry or upload a file it will throw exception.
1185
1186 Method accept boolean flag progress that will output progress bar. It useful method
1187 for standalone upload use case. In case to test large file upload.
1188
1189 Args
1190 path: - valid path to OVF file.
1191 progress - boolean progress bar show progress bar.
1192
1193 Return: if image uploaded correct method will provide image catalog UUID.
1194 """
1195
1196 if not path:
1197 raise vimconn.vimconnException("Image path can't be None.")
1198
1199 if not os.path.isfile(path):
1200 raise vimconn.vimconnException("Can't read file. File not found.")
1201
1202 if not os.access(path, os.R_OK):
1203 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1204
1205 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1206
1207 dirpath, filename = os.path.split(path)
1208 flname, file_extension = os.path.splitext(path)
1209 if file_extension != '.ovf':
1210 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1211 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1212
1213 catalog_name = os.path.splitext(filename)[0]
1214 catalog_md5_name = hashlib.md5(path).hexdigest()
1215 self.logger.debug("File name {} Catalog Name {} file path {} "
1216 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1217
1218 try:
1219 catalogs = self.vca.get_catalogs()
1220 except Exception as exp:
1221 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1222 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1223
1224 if len(catalogs) == 0:
1225 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1226 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1227 if not result:
1228 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1229 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1230 media_name=filename, medial_file_name=path, progress=progress)
1231 if not result:
1232 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1233 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1234 else:
1235 for catalog in catalogs:
1236 # search for existing catalog if we find same name we return ID
1237 # TODO optimize this
1238 if catalog.name == catalog_md5_name:
1239 self.logger.debug("Found existing catalog entry for {} "
1240 "catalog id {}".format(catalog_name,
1241 self.get_catalogid(catalog_md5_name, catalogs)))
1242 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1243
1244 # if we didn't find existing catalog we create a new one and upload image.
1245 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1246 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1247 if not result:
1248 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1249
1250 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1251 media_name=filename, medial_file_name=path, progress=progress)
1252 if not result:
1253 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1254
1255 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1256
1257 def get_image_list(self, filter_dict={}):
1258 '''Obtain tenant images from VIM
1259 Filter_dict can be:
1260 name: image name
1261 id: image uuid
1262 checksum: image checksum
1263 location: image path
1264 Returns the image list of dictionaries:
1265 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1266 List can be empty
1267 '''
1268
1269 try:
1270 image_list = []
1271 catalogs = self.vca.get_catalogs()
1272 if len(catalogs) == 0:
1273 return image_list
1274 else:
1275 for catalog in catalogs:
1276 catalog_uuid = catalog.get_id().split(":")[3]
1277 name = catalog.name
1278 filtered_dict = {}
1279 if filter_dict.get("name") and filter_dict["name"] != name:
1280 continue
1281 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1282 continue
1283 filtered_dict ["name"] = name
1284 filtered_dict ["id"] = catalog_uuid
1285 image_list.append(filtered_dict)
1286
1287 self.logger.debug("List of already created catalog items: {}".format(image_list))
1288 return image_list
1289 except Exception as exp:
1290 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1291
1292 def get_vappid(self, vdc=None, vapp_name=None):
1293 """ Method takes vdc object and vApp name and returns vapp uuid or None
1294
1295 Args:
1296 vdc: The VDC object.
1297 vapp_name: is application vappp name identifier
1298
1299 Returns:
1300 The return vApp name otherwise None
1301 """
1302 if vdc is None or vapp_name is None:
1303 return None
1304 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1305 try:
1306 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1307 vdc.ResourceEntities.ResourceEntity)
1308 if len(refs) == 1:
1309 return refs[0].href.split("vapp")[1][1:]
1310 except Exception as e:
1311 self.logger.exception(e)
1312 return False
1313 return None
1314
1315 def check_vapp(self, vdc=None, vapp_uuid=None):
1316 """ Method Method returns True or False if vapp deployed in vCloud director
1317
1318 Args:
1319 vca: Connector to VCA
1320 vdc: The VDC object.
1321 vappid: vappid is application identifier
1322
1323 Returns:
1324 The return True if vApp deployed
1325 :param vdc:
1326 :param vapp_uuid:
1327 """
1328 try:
1329 refs = filter(lambda ref:
1330 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1331 vdc.ResourceEntities.ResourceEntity)
1332 for ref in refs:
1333 vappid = ref.href.split("vapp")[1][1:]
1334 # find vapp with respected vapp uuid
1335 if vappid == vapp_uuid:
1336 return True
1337 except Exception as e:
1338 self.logger.exception(e)
1339 return False
1340 return False
1341
1342 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1343 """Method returns vApp name from vCD and lookup done by vapp_id.
1344
1345 Args:
1346 vca: Connector to VCA
1347 vdc: The VDC object.
1348 vapp_uuid: vappid is application identifier
1349
1350 Returns:
1351 The return vApp name otherwise None
1352 """
1353
1354 try:
1355 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1356 vdc.ResourceEntities.ResourceEntity)
1357 for ref in refs:
1358 # we care only about UUID the rest doesn't matter
1359 vappid = ref.href.split("vapp")[1][1:]
1360 if vappid == vapp_uuid:
1361 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1362 logger=self.logger)
1363
1364 #Retry login if session expired & retry sending request
1365 if response.status_code == 403:
1366 response = self.retry_rest('GET', ref.href)
1367
1368 tree = XmlElementTree.fromstring(response.content)
1369 return tree.attrib['name']
1370 except Exception as e:
1371 self.logger.exception(e)
1372 return None
1373 return None
1374
1375 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1376 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1377 """Adds a VM instance to VIM
1378 Params:
1379 start: indicates if VM must start or boot in pause mode. Ignored
1380 image_id,flavor_id: image and flavor uuid
1381 net_list: list of interfaces, each one is a dictionary with:
1382 name:
1383 net_id: network uuid to connect
1384 vpci: virtual vcpi to assign
1385 model: interface model, virtio, e2000, ...
1386 mac_address:
1387 use: 'data', 'bridge', 'mgmt'
1388 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1389 vim_id: filled/added by this function
1390 cloud_config: can be a text script to be passed directly to cloud-init,
1391 or an object to inject users and ssh keys with format:
1392 key-pairs: [] list of keys to install to the default user
1393 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1394 #TODO ip, security groups
1395 Returns >=0, the instance identifier
1396 <0, error_text
1397 """
1398
1399 self.logger.info("Creating new instance for entry {}".format(name))
1400 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1401 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1402
1403 #new vm name = vmname + tenant_id + uuid
1404 new_vm_name = [name, '-', str(uuid.uuid4())]
1405 vmname_andid = ''.join(new_vm_name)
1406
1407 # if vm already deployed we return existing uuid
1408 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1409 # if vapp_uuid is not None:
1410 # return vapp_uuid
1411
1412 # we check for presence of VDC, Catalog entry and Flavor.
1413 vdc = self.get_vdc_details()
1414 if vdc is None:
1415 raise vimconn.vimconnNotFoundException(
1416 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1417 catalogs = self.vca.get_catalogs()
1418 if catalogs is None:
1419 #Retry once, if failed by refreshing token
1420 self.get_token()
1421 catalogs = self.vca.get_catalogs()
1422 if catalogs is None:
1423 raise vimconn.vimconnNotFoundException(
1424 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1425
1426 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1427 if catalog_hash_name:
1428 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1429 else:
1430 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1431 "(Failed retrieve catalog information {})".format(name, image_id))
1432
1433
1434 # Set vCPU and Memory based on flavor.
1435 vm_cpus = None
1436 vm_memory = None
1437 vm_disk = None
1438 numas = None
1439
1440 if flavor_id is not None:
1441 if flavor_id not in vimconnector.flavorlist:
1442 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1443 "Failed retrieve flavor information "
1444 "flavor id {}".format(name, flavor_id))
1445 else:
1446 try:
1447 flavor = vimconnector.flavorlist[flavor_id]
1448 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1449 vm_memory = flavor[FLAVOR_RAM_KEY]
1450 vm_disk = flavor[FLAVOR_DISK_KEY]
1451 extended = flavor.get("extended", None)
1452 if extended:
1453 numas=extended.get("numas", None)
1454
1455 except Exception as exp:
1456 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1457
1458 # image upload creates template name as catalog name space Template.
1459 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1460 power_on = 'false'
1461 if start:
1462 power_on = 'true'
1463
1464 # client must provide at least one entry in net_list if not we report error
1465 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1466 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1467 primary_net = None
1468 primary_netname = None
1469 network_mode = 'bridged'
1470 if net_list is not None and len(net_list) > 0:
1471 for net in net_list:
1472 if 'use' in net and net['use'] == 'mgmt':
1473 primary_net = net
1474 if primary_net is None:
1475 primary_net = net_list[0]
1476
1477 try:
1478 primary_net_id = primary_net['net_id']
1479 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1480 if 'name' in network_dict:
1481 primary_netname = network_dict['name']
1482
1483 except KeyError:
1484 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1485 else:
1486 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1487
1488 # use: 'data', 'bridge', 'mgmt'
1489 # create vApp. Set vcpu and ram based on flavor id.
1490 try:
1491 for retry in (1,2):
1492 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1493 self.get_catalogbyid(image_id, catalogs),
1494 network_name=None, # None while creating vapp
1495 network_mode=network_mode,
1496 vm_name=vmname_andid,
1497 vm_cpus=vm_cpus, # can be None if flavor is None
1498 vm_memory=vm_memory) # can be None if flavor is None
1499
1500 if not vapptask and retry==1:
1501 self.get_token() # Retry getting token
1502 continue
1503 else:
1504 break
1505
1506 if vapptask is None or vapptask is False:
1507 raise vimconn.vimconnUnexpectedResponse(
1508 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1509 if type(vapptask) is VappTask:
1510 self.vca.block_until_completed(vapptask)
1511
1512 except Exception as exp:
1513 raise vimconn.vimconnUnexpectedResponse(
1514 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1515
1516 # we should have now vapp in undeployed state.
1517 try:
1518 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1519
1520 except Exception as exp:
1521 raise vimconn.vimconnUnexpectedResponse(
1522 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1523 .format(vmname_andid, exp))
1524
1525 if vapp_uuid is None:
1526 raise vimconn.vimconnUnexpectedResponse(
1527 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1528 vmname_andid))
1529
1530 #Add PCI passthrough/SRIOV configrations
1531 vm_obj = None
1532 pci_devices_info = []
1533 sriov_net_info = []
1534 reserve_memory = False
1535
1536 for net in net_list:
1537 if net["type"]=="PF":
1538 pci_devices_info.append(net)
1539 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1540 sriov_net_info.append(net)
1541
1542 #Add PCI
1543 if len(pci_devices_info) > 0:
1544 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1545 vmname_andid ))
1546 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1547 pci_devices_info,
1548 vmname_andid)
1549 if PCI_devices_status:
1550 self.logger.info("Added PCI devives {} to VM {}".format(
1551 pci_devices_info,
1552 vmname_andid)
1553 )
1554 reserve_memory = True
1555 else:
1556 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1557 pci_devices_info,
1558 vmname_andid)
1559 )
1560
1561 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1562 # Modify vm disk
1563 if vm_disk:
1564 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1565 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1566 if result :
1567 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1568
1569 #Add new or existing disks to vApp
1570 if disk_list:
1571 added_existing_disk = False
1572 for disk in disk_list:
1573 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1574 image_id = disk['image_id']
1575 # Adding CD-ROM to VM
1576 # will revisit code once specification ready to support this feature
1577 self.insert_media_to_vm(vapp, image_id)
1578 elif "image_id" in disk and disk["image_id"] is not None:
1579 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1580 disk["image_id"] , vapp_uuid))
1581 self.add_existing_disk(catalogs=catalogs,
1582 image_id=disk["image_id"],
1583 size = disk["size"],
1584 template_name=templateName,
1585 vapp_uuid=vapp_uuid
1586 )
1587 added_existing_disk = True
1588 else:
1589 #Wait till added existing disk gets reflected into vCD database/API
1590 if added_existing_disk:
1591 time.sleep(5)
1592 added_existing_disk = False
1593 self.add_new_disk(vapp_uuid, disk['size'])
1594
1595 if numas:
1596 # Assigning numa affinity setting
1597 for numa in numas:
1598 if 'paired-threads-id' in numa:
1599 paired_threads_id = numa['paired-threads-id']
1600 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1601
1602 # add NICs & connect to networks in netlist
1603 try:
1604 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1605 nicIndex = 0
1606 primary_nic_index = 0
1607 for net in net_list:
1608 # openmano uses network id in UUID format.
1609 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1610 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1611 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1612
1613 if 'net_id' not in net:
1614 continue
1615
1616 interface_net_id = net['net_id']
1617 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1618 interface_network_mode = net['use']
1619
1620 if interface_network_mode == 'mgmt':
1621 primary_nic_index = nicIndex
1622
1623 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1624 - DHCP (The IP address is obtained from a DHCP service.)
1625 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1626 - NONE (No IP addressing mode specified.)"""
1627
1628 if primary_netname is not None:
1629 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1630 if len(nets) == 1:
1631 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1632
1633 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1634 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1635 if type(task) is GenericTask:
1636 self.vca.block_until_completed(task)
1637 # connect network to VM - with all DHCP by default
1638
1639 type_list = ['PF','VF','VFnotShared']
1640 if 'type' in net and net['type'] not in type_list:
1641 # fetching nic type from vnf
1642 if 'model' in net:
1643 nic_type = net['model']
1644 self.logger.info("new_vminstance(): adding network adapter "\
1645 "to a network {}".format(nets[0].name))
1646 self.add_network_adapter_to_vms(vapp, nets[0].name,
1647 primary_nic_index,
1648 nicIndex,
1649 net,
1650 nic_type=nic_type)
1651 else:
1652 self.logger.info("new_vminstance(): adding network adapter "\
1653 "to a network {}".format(nets[0].name))
1654 self.add_network_adapter_to_vms(vapp, nets[0].name,
1655 primary_nic_index,
1656 nicIndex,
1657 net)
1658 nicIndex += 1
1659
1660 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1661 # cloud-init for ssh-key injection
1662 if cloud_config:
1663 self.cloud_init(vapp,cloud_config)
1664
1665 # deploy and power on vm
1666 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1667 deploytask = vapp.deploy(powerOn=False)
1668 if type(deploytask) is GenericTask:
1669 self.vca.block_until_completed(deploytask)
1670
1671 # ############# Stub code for SRIOV #################
1672 #Add SRIOV
1673 # if len(sriov_net_info) > 0:
1674 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1675 # vmname_andid ))
1676 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1677 # sriov_net_info,
1678 # vmname_andid)
1679 # if sriov_status:
1680 # self.logger.info("Added SRIOV {} to VM {}".format(
1681 # sriov_net_info,
1682 # vmname_andid)
1683 # )
1684 # reserve_memory = True
1685 # else:
1686 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1687 # sriov_net_info,
1688 # vmname_andid)
1689 # )
1690
1691 # If VM has PCI devices or SRIOV reserve memory for VM
1692 if reserve_memory:
1693 memReserve = vm_obj.config.hardware.memoryMB
1694 spec = vim.vm.ConfigSpec()
1695 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1696 task = vm_obj.ReconfigVM_Task(spec=spec)
1697 if task:
1698 result = self.wait_for_vcenter_task(task, vcenter_conect)
1699 self.logger.info("Reserved memmoery {} MB for "\
1700 "VM VM status: {}".format(str(memReserve),result))
1701 else:
1702 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1703 str(memReserve),str(vm_obj)))
1704
1705 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1706
1707 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1708 poweron_task = vapp.poweron()
1709 if type(poweron_task) is GenericTask:
1710 self.vca.block_until_completed(poweron_task)
1711
1712 except Exception as exp :
1713 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1714 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1715 .format(name, exp))
1716 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1717 .format(name, exp))
1718
1719 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1720 wait_time = 0
1721 vapp_uuid = None
1722 while wait_time <= MAX_WAIT_TIME:
1723 try:
1724 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1725 except Exception as exp:
1726 raise vimconn.vimconnUnexpectedResponse(
1727 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1728 .format(vmname_andid, exp))
1729
1730 if vapp and vapp.me.deployed:
1731 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1732 break
1733 else:
1734 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1735 time.sleep(INTERVAL_TIME)
1736
1737 wait_time +=INTERVAL_TIME
1738
1739 if vapp_uuid is not None:
1740 return vapp_uuid
1741 else:
1742 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1743
1744 ##
1745 ##
1746 ## based on current discussion
1747 ##
1748 ##
1749 ## server:
1750 # created: '2016-09-08T11:51:58'
1751 # description: simple-instance.linux1.1
1752 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1753 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1754 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1755 # status: ACTIVE
1756 # error_msg:
1757 # interfaces: …
1758 #
1759 def get_vminstance(self, vim_vm_uuid=None):
1760 """Returns the VM instance information from VIM"""
1761
1762 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1763
1764 vdc = self.get_vdc_details()
1765 if vdc is None:
1766 raise vimconn.vimconnConnectionException(
1767 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1768
1769 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1770 if not vm_info_dict:
1771 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1772 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1773
1774 status_key = vm_info_dict['status']
1775 error = ''
1776 try:
1777 vm_dict = {'created': vm_info_dict['created'],
1778 'description': vm_info_dict['name'],
1779 'status': vcdStatusCode2manoFormat[int(status_key)],
1780 'hostId': vm_info_dict['vmuuid'],
1781 'error_msg': error,
1782 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1783
1784 if 'interfaces' in vm_info_dict:
1785 vm_dict['interfaces'] = vm_info_dict['interfaces']
1786 else:
1787 vm_dict['interfaces'] = []
1788 except KeyError:
1789 vm_dict = {'created': '',
1790 'description': '',
1791 'status': vcdStatusCode2manoFormat[int(-1)],
1792 'hostId': vm_info_dict['vmuuid'],
1793 'error_msg': "Inconsistency state",
1794 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1795
1796 return vm_dict
1797
1798 def delete_vminstance(self, vm__vim_uuid):
1799 """Method poweroff and remove VM instance from vcloud director network.
1800
1801 Args:
1802 vm__vim_uuid: VM UUID
1803
1804 Returns:
1805 Returns the instance identifier
1806 """
1807
1808 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1809
1810 vdc = self.get_vdc_details()
1811 if vdc is None:
1812 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1813 self.tenant_name))
1814 raise vimconn.vimconnException(
1815 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1816
1817 try:
1818 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1819 if vapp_name is None:
1820 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1821 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1822 else:
1823 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1824
1825 # Delete vApp and wait for status change if task executed and vApp is None.
1826 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1827
1828 if vapp:
1829 if vapp.me.deployed:
1830 self.logger.info("Powering off vApp {}".format(vapp_name))
1831 #Power off vApp
1832 powered_off = False
1833 wait_time = 0
1834 while wait_time <= MAX_WAIT_TIME:
1835 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1836 if not vapp:
1837 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1838 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1839
1840 power_off_task = vapp.poweroff()
1841 if type(power_off_task) is GenericTask:
1842 result = self.vca.block_until_completed(power_off_task)
1843 if result:
1844 powered_off = True
1845 break
1846 else:
1847 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1848 time.sleep(INTERVAL_TIME)
1849
1850 wait_time +=INTERVAL_TIME
1851 if not powered_off:
1852 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1853 else:
1854 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1855
1856 #Undeploy vApp
1857 self.logger.info("Undeploy vApp {}".format(vapp_name))
1858 wait_time = 0
1859 undeployed = False
1860 while wait_time <= MAX_WAIT_TIME:
1861 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1862 if not vapp:
1863 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1864 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1865 undeploy_task = vapp.undeploy(action='powerOff')
1866
1867 if type(undeploy_task) is GenericTask:
1868 result = self.vca.block_until_completed(undeploy_task)
1869 if result:
1870 undeployed = True
1871 break
1872 else:
1873 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1874 time.sleep(INTERVAL_TIME)
1875
1876 wait_time +=INTERVAL_TIME
1877
1878 if not undeployed:
1879 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1880
1881 # delete vapp
1882 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1883 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1884
1885 if vapp is not None:
1886 wait_time = 0
1887 result = False
1888
1889 while wait_time <= MAX_WAIT_TIME:
1890 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1891 if not vapp:
1892 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1893 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1894
1895 delete_task = vapp.delete()
1896
1897 if type(delete_task) is GenericTask:
1898 self.vca.block_until_completed(delete_task)
1899 result = self.vca.block_until_completed(delete_task)
1900 if result:
1901 break
1902 else:
1903 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1904 time.sleep(INTERVAL_TIME)
1905
1906 wait_time +=INTERVAL_TIME
1907
1908 if not result:
1909 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1910
1911 except:
1912 self.logger.debug(traceback.format_exc())
1913 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1914
1915 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1916 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1917 return vm__vim_uuid
1918 else:
1919 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1920
1921 def refresh_vms_status(self, vm_list):
1922 """Get the status of the virtual machines and their interfaces/ports
1923 Params: the list of VM identifiers
1924 Returns a dictionary with:
1925 vm_id: #VIM id of this Virtual Machine
1926 status: #Mandatory. Text with one of:
1927 # DELETED (not found at vim)
1928 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1929 # OTHER (Vim reported other status not understood)
1930 # ERROR (VIM indicates an ERROR status)
1931 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1932 # CREATING (on building process), ERROR
1933 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1934 #
1935 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1936 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1937 interfaces:
1938 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1939 mac_address: #Text format XX:XX:XX:XX:XX:XX
1940 vim_net_id: #network id where this interface is connected
1941 vim_interface_id: #interface/port VIM id
1942 ip_address: #null, or text with IPv4, IPv6 address
1943 """
1944
1945 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1946
1947 vdc = self.get_vdc_details()
1948 if vdc is None:
1949 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1950
1951 vms_dict = {}
1952 nsx_edge_list = []
1953 for vmuuid in vm_list:
1954 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1955 if vmname is not None:
1956
1957 try:
1958 vm_pci_details = self.get_vm_pci_details(vmuuid)
1959 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1960 vm_info = the_vapp.get_vms_details()
1961 vm_status = vm_info[0]['status']
1962 vm_info[0].update(vm_pci_details)
1963
1964 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1965 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1966 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1967
1968 # get networks
1969 vm_app_networks = the_vapp.get_vms_network_info()
1970 for vapp_network in vm_app_networks:
1971 for vm_network in vapp_network:
1972 if vm_network['name'] == vmname:
1973 #Assign IP Address based on MAC Address in NSX DHCP lease info
1974 if vm_network['ip'] is None:
1975 if not nsx_edge_list:
1976 nsx_edge_list = self.get_edge_details()
1977 if nsx_edge_list is None:
1978 raise vimconn.vimconnException("refresh_vms_status:"\
1979 "Failed to get edge details from NSX Manager")
1980 if vm_network['mac'] is not None:
1981 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1982
1983 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1984 interface = {"mac_address": vm_network['mac'],
1985 "vim_net_id": vm_net_id,
1986 "vim_interface_id": vm_net_id,
1987 'ip_address': vm_network['ip']}
1988 # interface['vim_info'] = yaml.safe_dump(vm_network)
1989 vm_dict["interfaces"].append(interface)
1990 # add a vm to vm dict
1991 vms_dict.setdefault(vmuuid, vm_dict)
1992 except Exception as exp:
1993 self.logger.debug("Error in response {}".format(exp))
1994 self.logger.debug(traceback.format_exc())
1995
1996 return vms_dict
1997
1998
1999 def get_edge_details(self):
2000 """Get the NSX edge list from NSX Manager
2001 Returns list of NSX edges
2002 """
2003 edge_list = []
2004 rheaders = {'Content-Type': 'application/xml'}
2005 nsx_api_url = '/api/4.0/edges'
2006
2007 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2008
2009 try:
2010 resp = requests.get(self.nsx_manager + nsx_api_url,
2011 auth = (self.nsx_user, self.nsx_password),
2012 verify = False, headers = rheaders)
2013 if resp.status_code == requests.codes.ok:
2014 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2015 for edge_pages in paged_Edge_List:
2016 if edge_pages.tag == 'edgePage':
2017 for edge_summary in edge_pages:
2018 if edge_summary.tag == 'pagingInfo':
2019 for element in edge_summary:
2020 if element.tag == 'totalCount' and element.text == '0':
2021 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2022 .format(self.nsx_manager))
2023
2024 if edge_summary.tag == 'edgeSummary':
2025 for element in edge_summary:
2026 if element.tag == 'id':
2027 edge_list.append(element.text)
2028 else:
2029 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2030 .format(self.nsx_manager))
2031
2032 if not edge_list:
2033 raise vimconn.vimconnException("get_edge_details: "\
2034 "No NSX edge details found: {}"
2035 .format(self.nsx_manager))
2036 else:
2037 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2038 return edge_list
2039 else:
2040 self.logger.debug("get_edge_details: "
2041 "Failed to get NSX edge details from NSX Manager: {}"
2042 .format(resp.content))
2043 return None
2044
2045 except Exception as exp:
2046 self.logger.debug("get_edge_details: "\
2047 "Failed to get NSX edge details from NSX Manager: {}"
2048 .format(exp))
2049 raise vimconn.vimconnException("get_edge_details: "\
2050 "Failed to get NSX edge details from NSX Manager: {}"
2051 .format(exp))
2052
2053
2054 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2055 """Get IP address details from NSX edges, using the MAC address
2056 PARAMS: nsx_edges : List of NSX edges
2057 mac_address : Find IP address corresponding to this MAC address
2058 Returns: IP address corrresponding to the provided MAC address
2059 """
2060
2061 ip_addr = None
2062 rheaders = {'Content-Type': 'application/xml'}
2063
2064 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2065
2066 try:
2067 for edge in nsx_edges:
2068 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2069
2070 resp = requests.get(self.nsx_manager + nsx_api_url,
2071 auth = (self.nsx_user, self.nsx_password),
2072 verify = False, headers = rheaders)
2073
2074 if resp.status_code == requests.codes.ok:
2075 dhcp_leases = XmlElementTree.fromstring(resp.text)
2076 for child in dhcp_leases:
2077 if child.tag == 'dhcpLeaseInfo':
2078 dhcpLeaseInfo = child
2079 for leaseInfo in dhcpLeaseInfo:
2080 for elem in leaseInfo:
2081 if (elem.tag)=='macAddress':
2082 edge_mac_addr = elem.text
2083 if (elem.tag)=='ipAddress':
2084 ip_addr = elem.text
2085 if edge_mac_addr is not None:
2086 if edge_mac_addr == mac_address:
2087 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2088 .format(ip_addr, mac_address,edge))
2089 return ip_addr
2090 else:
2091 self.logger.debug("get_ipaddr_from_NSXedge: "\
2092 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2093 .format(resp.content))
2094
2095 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2096 return None
2097
2098 except XmlElementTree.ParseError as Err:
2099 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2100
2101
2102 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
2103 """Send and action over a VM instance from VIM
2104 Returns the vm_id if the action was successfully sent to the VIM"""
2105
2106 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2107 if vm__vim_uuid is None or action_dict is None:
2108 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2109
2110 vdc = self.get_vdc_details()
2111 if vdc is None:
2112 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
2113
2114 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2115 if vapp_name is None:
2116 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2117 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2118 else:
2119 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2120
2121 try:
2122 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2123 # TODO fix all status
2124 if "start" in action_dict:
2125 vm_info = the_vapp.get_vms_details()
2126 vm_status = vm_info[0]['status']
2127 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2128 if vm_status == "Suspended" or vm_status == "Powered off":
2129 power_on_task = the_vapp.poweron()
2130 result = self.vca.block_until_completed(power_on_task)
2131 self.instance_actions_result("start", result, vapp_name)
2132 elif "rebuild" in action_dict:
2133 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2134 rebuild_task = the_vapp.deploy(powerOn=True)
2135 result = self.vca.block_until_completed(rebuild_task)
2136 self.instance_actions_result("rebuild", result, vapp_name)
2137 elif "pause" in action_dict:
2138 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2139 pause_task = the_vapp.undeploy(action='suspend')
2140 result = self.vca.block_until_completed(pause_task)
2141 self.instance_actions_result("pause", result, vapp_name)
2142 elif "resume" in action_dict:
2143 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2144 power_task = the_vapp.poweron()
2145 result = self.vca.block_until_completed(power_task)
2146 self.instance_actions_result("resume", result, vapp_name)
2147 elif "shutoff" in action_dict or "shutdown" in action_dict:
2148 action_name , value = action_dict.items()[0]
2149 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2150 power_off_task = the_vapp.undeploy(action='powerOff')
2151 result = self.vca.block_until_completed(power_off_task)
2152 if action_name == "shutdown":
2153 self.instance_actions_result("shutdown", result, vapp_name)
2154 else:
2155 self.instance_actions_result("shutoff", result, vapp_name)
2156 elif "forceOff" in action_dict:
2157 result = the_vapp.undeploy(action='force')
2158 self.instance_actions_result("forceOff", result, vapp_name)
2159 elif "reboot" in action_dict:
2160 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2161 reboot_task = the_vapp.reboot()
2162 else:
2163 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2164 return vm__vim_uuid
2165 except Exception as exp :
2166 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2167 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2168
2169 def instance_actions_result(self, action, result, vapp_name):
2170 if result:
2171 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2172 else:
2173 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2174
2175 def get_vminstance_console(self, vm_id, console_type="vnc"):
2176 """
2177 Get a console for the virtual machine
2178 Params:
2179 vm_id: uuid of the VM
2180 console_type, can be:
2181 "novnc" (by default), "xvpvnc" for VNC types,
2182 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2183 Returns dict with the console parameters:
2184 protocol: ssh, ftp, http, https, ...
2185 server: usually ip address
2186 port: the http, ssh, ... port
2187 suffix: extra text, e.g. the http path and query string
2188 """
2189 raise vimconn.vimconnNotImplemented("Should have implemented this")
2190
2191 # NOT USED METHODS in current version
2192
2193 def host_vim2gui(self, host, server_dict):
2194 """Transform host dictionary from VIM format to GUI format,
2195 and append to the server_dict
2196 """
2197 raise vimconn.vimconnNotImplemented("Should have implemented this")
2198
2199 def get_hosts_info(self):
2200 """Get the information of deployed hosts
2201 Returns the hosts content"""
2202 raise vimconn.vimconnNotImplemented("Should have implemented this")
2203
2204 def get_hosts(self, vim_tenant):
2205 """Get the hosts and deployed instances
2206 Returns the hosts content"""
2207 raise vimconn.vimconnNotImplemented("Should have implemented this")
2208
2209 def get_processor_rankings(self):
2210 """Get the processor rankings in the VIM database"""
2211 raise vimconn.vimconnNotImplemented("Should have implemented this")
2212
2213 def new_host(self, host_data):
2214 """Adds a new host to VIM"""
2215 '''Returns status code of the VIM response'''
2216 raise vimconn.vimconnNotImplemented("Should have implemented this")
2217
2218 def new_external_port(self, port_data):
2219 """Adds a external port to VIM"""
2220 '''Returns the port identifier'''
2221 raise vimconn.vimconnNotImplemented("Should have implemented this")
2222
2223 def new_external_network(self, net_name, net_type):
2224 """Adds a external network to VIM (shared)"""
2225 '''Returns the network identifier'''
2226 raise vimconn.vimconnNotImplemented("Should have implemented this")
2227
2228 def connect_port_network(self, port_id, network_id, admin=False):
2229 """Connects a external port to a network"""
2230 '''Returns status code of the VIM response'''
2231 raise vimconn.vimconnNotImplemented("Should have implemented this")
2232
2233 def new_vminstancefromJSON(self, vm_data):
2234 """Adds a VM instance to VIM"""
2235 '''Returns the instance identifier'''
2236 raise vimconn.vimconnNotImplemented("Should have implemented this")
2237
2238 def get_network_name_by_id(self, network_uuid=None):
2239 """Method gets vcloud director network named based on supplied uuid.
2240
2241 Args:
2242 network_uuid: network_id
2243
2244 Returns:
2245 The return network name.
2246 """
2247
2248 if not network_uuid:
2249 return None
2250
2251 try:
2252 org_dict = self.get_org(self.org_uuid)
2253 if 'networks' in org_dict:
2254 org_network_dict = org_dict['networks']
2255 for net_uuid in org_network_dict:
2256 if net_uuid == network_uuid:
2257 return org_network_dict[net_uuid]
2258 except:
2259 self.logger.debug("Exception in get_network_name_by_id")
2260 self.logger.debug(traceback.format_exc())
2261
2262 return None
2263
2264 def get_network_id_by_name(self, network_name=None):
2265 """Method gets vcloud director network uuid based on supplied name.
2266
2267 Args:
2268 network_name: network_name
2269 Returns:
2270 The return network uuid.
2271 network_uuid: network_id
2272 """
2273
2274 if not network_name:
2275 self.logger.debug("get_network_id_by_name() : Network name is empty")
2276 return None
2277
2278 try:
2279 org_dict = self.get_org(self.org_uuid)
2280 if org_dict and 'networks' in org_dict:
2281 org_network_dict = org_dict['networks']
2282 for net_uuid,net_name in org_network_dict.iteritems():
2283 if net_name == network_name:
2284 return net_uuid
2285
2286 except KeyError as exp:
2287 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2288
2289 return None
2290
2291 def list_org_action(self):
2292 """
2293 Method leverages vCloud director and query for available organization for particular user
2294
2295 Args:
2296 vca - is active VCA connection.
2297 vdc_name - is a vdc name that will be used to query vms action
2298
2299 Returns:
2300 The return XML respond
2301 """
2302
2303 url_list = [self.vca.host, '/api/org']
2304 vm_list_rest_call = ''.join(url_list)
2305
2306 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2307 response = Http.get(url=vm_list_rest_call,
2308 headers=self.vca.vcloud_session.get_vcloud_headers(),
2309 verify=self.vca.verify,
2310 logger=self.vca.logger)
2311
2312 if response.status_code == 403:
2313 response = self.retry_rest('GET', vm_list_rest_call)
2314
2315 if response.status_code == requests.codes.ok:
2316 return response.content
2317
2318 return None
2319
2320 def get_org_action(self, org_uuid=None):
2321 """
2322 Method leverages vCloud director and retrieve available object fdr organization.
2323
2324 Args:
2325 vca - is active VCA connection.
2326 vdc_name - is a vdc name that will be used to query vms action
2327
2328 Returns:
2329 The return XML respond
2330 """
2331
2332 if org_uuid is None:
2333 return None
2334
2335 url_list = [self.vca.host, '/api/org/', org_uuid]
2336 vm_list_rest_call = ''.join(url_list)
2337
2338 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2339 response = Http.get(url=vm_list_rest_call,
2340 headers=self.vca.vcloud_session.get_vcloud_headers(),
2341 verify=self.vca.verify,
2342 logger=self.vca.logger)
2343
2344 #Retry login if session expired & retry sending request
2345 if response.status_code == 403:
2346 response = self.retry_rest('GET', vm_list_rest_call)
2347
2348 if response.status_code == requests.codes.ok:
2349 return response.content
2350
2351 return None
2352
2353 def get_org(self, org_uuid=None):
2354 """
2355 Method retrieves available organization in vCloud Director
2356
2357 Args:
2358 org_uuid - is a organization uuid.
2359
2360 Returns:
2361 The return dictionary with following key
2362 "network" - for network list under the org
2363 "catalogs" - for network list under the org
2364 "vdcs" - for vdc list under org
2365 """
2366
2367 org_dict = {}
2368
2369 if org_uuid is None:
2370 return org_dict
2371
2372 content = self.get_org_action(org_uuid=org_uuid)
2373 try:
2374 vdc_list = {}
2375 network_list = {}
2376 catalog_list = {}
2377 vm_list_xmlroot = XmlElementTree.fromstring(content)
2378 for child in vm_list_xmlroot:
2379 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2380 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2381 org_dict['vdcs'] = vdc_list
2382 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2383 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2384 org_dict['networks'] = network_list
2385 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2386 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2387 org_dict['catalogs'] = catalog_list
2388 except:
2389 pass
2390
2391 return org_dict
2392
2393 def get_org_list(self):
2394 """
2395 Method retrieves available organization in vCloud Director
2396
2397 Args:
2398 vca - is active VCA connection.
2399
2400 Returns:
2401 The return dictionary and key for each entry VDC UUID
2402 """
2403
2404 org_dict = {}
2405
2406 content = self.list_org_action()
2407 try:
2408 vm_list_xmlroot = XmlElementTree.fromstring(content)
2409 for vm_xml in vm_list_xmlroot:
2410 if vm_xml.tag.split("}")[1] == 'Org':
2411 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2412 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2413 except:
2414 pass
2415
2416 return org_dict
2417
2418 def vms_view_action(self, vdc_name=None):
2419 """ Method leverages vCloud director vms query call
2420
2421 Args:
2422 vca - is active VCA connection.
2423 vdc_name - is a vdc name that will be used to query vms action
2424
2425 Returns:
2426 The return XML respond
2427 """
2428 vca = self.connect()
2429 if vdc_name is None:
2430 return None
2431
2432 url_list = [vca.host, '/api/vms/query']
2433 vm_list_rest_call = ''.join(url_list)
2434
2435 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2436 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2437 vca.vcloud_session.organization.Link)
2438 if len(refs) == 1:
2439 response = Http.get(url=vm_list_rest_call,
2440 headers=vca.vcloud_session.get_vcloud_headers(),
2441 verify=vca.verify,
2442 logger=vca.logger)
2443 if response.status_code == requests.codes.ok:
2444 return response.content
2445
2446 return None
2447
2448 def get_vapp_list(self, vdc_name=None):
2449 """
2450 Method retrieves vApp list deployed vCloud director and returns a dictionary
2451 contains a list of all vapp deployed for queried VDC.
2452 The key for a dictionary is vApp UUID
2453
2454
2455 Args:
2456 vca - is active VCA connection.
2457 vdc_name - is a vdc name that will be used to query vms action
2458
2459 Returns:
2460 The return dictionary and key for each entry vapp UUID
2461 """
2462
2463 vapp_dict = {}
2464 if vdc_name is None:
2465 return vapp_dict
2466
2467 content = self.vms_view_action(vdc_name=vdc_name)
2468 try:
2469 vm_list_xmlroot = XmlElementTree.fromstring(content)
2470 for vm_xml in vm_list_xmlroot:
2471 if vm_xml.tag.split("}")[1] == 'VMRecord':
2472 if vm_xml.attrib['isVAppTemplate'] == 'true':
2473 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2474 if 'vappTemplate-' in rawuuid[0]:
2475 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2476 # vm and use raw UUID as key
2477 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2478 except:
2479 pass
2480
2481 return vapp_dict
2482
2483 def get_vm_list(self, vdc_name=None):
2484 """
2485 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2486 contains a list of all VM's deployed for queried VDC.
2487 The key for a dictionary is VM UUID
2488
2489
2490 Args:
2491 vca - is active VCA connection.
2492 vdc_name - is a vdc name that will be used to query vms action
2493
2494 Returns:
2495 The return dictionary and key for each entry vapp UUID
2496 """
2497 vm_dict = {}
2498
2499 if vdc_name is None:
2500 return vm_dict
2501
2502 content = self.vms_view_action(vdc_name=vdc_name)
2503 try:
2504 vm_list_xmlroot = XmlElementTree.fromstring(content)
2505 for vm_xml in vm_list_xmlroot:
2506 if vm_xml.tag.split("}")[1] == 'VMRecord':
2507 if vm_xml.attrib['isVAppTemplate'] == 'false':
2508 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2509 if 'vm-' in rawuuid[0]:
2510 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2511 # vm and use raw UUID as key
2512 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2513 except:
2514 pass
2515
2516 return vm_dict
2517
2518 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2519 """
2520 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2521 contains a list of all VM's deployed for queried VDC.
2522 The key for a dictionary is VM UUID
2523
2524
2525 Args:
2526 vca - is active VCA connection.
2527 vdc_name - is a vdc name that will be used to query vms action
2528
2529 Returns:
2530 The return dictionary and key for each entry vapp UUID
2531 """
2532 vm_dict = {}
2533 vca = self.connect()
2534 if not vca:
2535 raise vimconn.vimconnConnectionException("self.connect() is failed")
2536
2537 if vdc_name is None:
2538 return vm_dict
2539
2540 content = self.vms_view_action(vdc_name=vdc_name)
2541 try:
2542 vm_list_xmlroot = XmlElementTree.fromstring(content)
2543 for vm_xml in vm_list_xmlroot:
2544 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2545 # lookup done by UUID
2546 if isuuid:
2547 if vapp_name in vm_xml.attrib['container']:
2548 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2549 if 'vm-' in rawuuid[0]:
2550 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2551 break
2552 # lookup done by Name
2553 else:
2554 if vapp_name in vm_xml.attrib['name']:
2555 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2556 if 'vm-' in rawuuid[0]:
2557 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2558 break
2559 except:
2560 pass
2561
2562 return vm_dict
2563
2564 def get_network_action(self, network_uuid=None):
2565 """
2566 Method leverages vCloud director and query network based on network uuid
2567
2568 Args:
2569 vca - is active VCA connection.
2570 network_uuid - is a network uuid
2571
2572 Returns:
2573 The return XML respond
2574 """
2575
2576 if network_uuid is None:
2577 return None
2578
2579 url_list = [self.vca.host, '/api/network/', network_uuid]
2580 vm_list_rest_call = ''.join(url_list)
2581
2582 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2583 response = Http.get(url=vm_list_rest_call,
2584 headers=self.vca.vcloud_session.get_vcloud_headers(),
2585 verify=self.vca.verify,
2586 logger=self.vca.logger)
2587
2588 #Retry login if session expired & retry sending request
2589 if response.status_code == 403:
2590 response = self.retry_rest('GET', vm_list_rest_call)
2591
2592 if response.status_code == requests.codes.ok:
2593 return response.content
2594
2595 return None
2596
2597 def get_vcd_network(self, network_uuid=None):
2598 """
2599 Method retrieves available network from vCloud Director
2600
2601 Args:
2602 network_uuid - is VCD network UUID
2603
2604 Each element serialized as key : value pair
2605
2606 Following keys available for access. network_configuration['Gateway'}
2607 <Configuration>
2608 <IpScopes>
2609 <IpScope>
2610 <IsInherited>true</IsInherited>
2611 <Gateway>172.16.252.100</Gateway>
2612 <Netmask>255.255.255.0</Netmask>
2613 <Dns1>172.16.254.201</Dns1>
2614 <Dns2>172.16.254.202</Dns2>
2615 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2616 <IsEnabled>true</IsEnabled>
2617 <IpRanges>
2618 <IpRange>
2619 <StartAddress>172.16.252.1</StartAddress>
2620 <EndAddress>172.16.252.99</EndAddress>
2621 </IpRange>
2622 </IpRanges>
2623 </IpScope>
2624 </IpScopes>
2625 <FenceMode>bridged</FenceMode>
2626
2627 Returns:
2628 The return dictionary and key for each entry vapp UUID
2629 """
2630
2631 network_configuration = {}
2632 if network_uuid is None:
2633 return network_uuid
2634
2635 try:
2636 content = self.get_network_action(network_uuid=network_uuid)
2637 vm_list_xmlroot = XmlElementTree.fromstring(content)
2638
2639 network_configuration['status'] = vm_list_xmlroot.get("status")
2640 network_configuration['name'] = vm_list_xmlroot.get("name")
2641 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2642
2643 for child in vm_list_xmlroot:
2644 if child.tag.split("}")[1] == 'IsShared':
2645 network_configuration['isShared'] = child.text.strip()
2646 if child.tag.split("}")[1] == 'Configuration':
2647 for configuration in child.iter():
2648 tagKey = configuration.tag.split("}")[1].strip()
2649 if tagKey != "":
2650 network_configuration[tagKey] = configuration.text.strip()
2651 return network_configuration
2652 except Exception as exp :
2653 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2654 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2655
2656 return network_configuration
2657
2658 def delete_network_action(self, network_uuid=None):
2659 """
2660 Method delete given network from vCloud director
2661
2662 Args:
2663 network_uuid - is a network uuid that client wish to delete
2664
2665 Returns:
2666 The return None or XML respond or false
2667 """
2668
2669 vca = self.connect_as_admin()
2670 if not vca:
2671 raise vimconn.vimconnConnectionException("self.connect() is failed")
2672 if network_uuid is None:
2673 return False
2674
2675 url_list = [vca.host, '/api/admin/network/', network_uuid]
2676 vm_list_rest_call = ''.join(url_list)
2677
2678 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2679 response = Http.delete(url=vm_list_rest_call,
2680 headers=vca.vcloud_session.get_vcloud_headers(),
2681 verify=vca.verify,
2682 logger=vca.logger)
2683
2684 if response.status_code == 202:
2685 return True
2686
2687 return False
2688
2689 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2690 ip_profile=None, isshared='true'):
2691 """
2692 Method create network in vCloud director
2693
2694 Args:
2695 network_name - is network name to be created.
2696 net_type - can be 'bridge','data','ptp','mgmt'.
2697 ip_profile is a dict containing the IP parameters of the network
2698 isshared - is a boolean
2699 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2700 It optional attribute. by default if no parent network indicate the first available will be used.
2701
2702 Returns:
2703 The return network uuid or return None
2704 """
2705
2706 new_network_name = [network_name, '-', str(uuid.uuid4())]
2707 content = self.create_network_rest(network_name=''.join(new_network_name),
2708 ip_profile=ip_profile,
2709 net_type=net_type,
2710 parent_network_uuid=parent_network_uuid,
2711 isshared=isshared)
2712 if content is None:
2713 self.logger.debug("Failed create network {}.".format(network_name))
2714 return None
2715
2716 try:
2717 vm_list_xmlroot = XmlElementTree.fromstring(content)
2718 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2719 if len(vcd_uuid) == 4:
2720 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2721 return vcd_uuid[3]
2722 except:
2723 self.logger.debug("Failed create network {}".format(network_name))
2724 return None
2725
2726 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2727 ip_profile=None, isshared='true'):
2728 """
2729 Method create network in vCloud director
2730
2731 Args:
2732 network_name - is network name to be created.
2733 net_type - can be 'bridge','data','ptp','mgmt'.
2734 ip_profile is a dict containing the IP parameters of the network
2735 isshared - is a boolean
2736 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2737 It optional attribute. by default if no parent network indicate the first available will be used.
2738
2739 Returns:
2740 The return network uuid or return None
2741 """
2742
2743 vca = self.connect_as_admin()
2744 if not vca:
2745 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2746 if network_name is None:
2747 return None
2748
2749 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2750 vm_list_rest_call = ''.join(url_list)
2751 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2752 response = Http.get(url=vm_list_rest_call,
2753 headers=vca.vcloud_session.get_vcloud_headers(),
2754 verify=vca.verify,
2755 logger=vca.logger)
2756
2757 provider_network = None
2758 available_networks = None
2759 add_vdc_rest_url = None
2760
2761 if response.status_code != requests.codes.ok:
2762 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2763 response.status_code))
2764 return None
2765 else:
2766 try:
2767 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2768 for child in vm_list_xmlroot:
2769 if child.tag.split("}")[1] == 'ProviderVdcReference':
2770 provider_network = child.attrib.get('href')
2771 # application/vnd.vmware.admin.providervdc+xml
2772 if child.tag.split("}")[1] == 'Link':
2773 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2774 and child.attrib.get('rel') == 'add':
2775 add_vdc_rest_url = child.attrib.get('href')
2776 except:
2777 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2778 self.logger.debug("Respond body {}".format(response.content))
2779 return None
2780
2781 # find pvdc provided available network
2782 response = Http.get(url=provider_network,
2783 headers=vca.vcloud_session.get_vcloud_headers(),
2784 verify=vca.verify,
2785 logger=vca.logger)
2786 if response.status_code != requests.codes.ok:
2787 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2788 response.status_code))
2789 return None
2790
2791 # available_networks.split("/")[-1]
2792
2793 if parent_network_uuid is None:
2794 try:
2795 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2796 for child in vm_list_xmlroot.iter():
2797 if child.tag.split("}")[1] == 'AvailableNetworks':
2798 for networks in child.iter():
2799 # application/vnd.vmware.admin.network+xml
2800 if networks.attrib.get('href') is not None:
2801 available_networks = networks.attrib.get('href')
2802 break
2803 except:
2804 return None
2805
2806 try:
2807 #Configure IP profile of the network
2808 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2809
2810 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2811 subnet_rand = random.randint(0, 255)
2812 ip_base = "192.168.{}.".format(subnet_rand)
2813 ip_profile['subnet_address'] = ip_base + "0/24"
2814 else:
2815 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2816
2817 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2818 ip_profile['gateway_address']=ip_base + "1"
2819 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2820 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2821 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2822 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2823 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2824 ip_profile['dhcp_start_address']=ip_base + "3"
2825 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2826 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2827 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2828 ip_profile['dns_address']=ip_base + "2"
2829
2830 gateway_address=ip_profile['gateway_address']
2831 dhcp_count=int(ip_profile['dhcp_count'])
2832 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2833
2834 if ip_profile['dhcp_enabled']==True:
2835 dhcp_enabled='true'
2836 else:
2837 dhcp_enabled='false'
2838 dhcp_start_address=ip_profile['dhcp_start_address']
2839
2840 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2841 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2842 end_ip_int += dhcp_count - 1
2843 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2844
2845 ip_version=ip_profile['ip_version']
2846 dns_address=ip_profile['dns_address']
2847 except KeyError as exp:
2848 self.logger.debug("Create Network REST: Key error {}".format(exp))
2849 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2850
2851 # either use client provided UUID or search for a first available
2852 # if both are not defined we return none
2853 if parent_network_uuid is not None:
2854 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2855 add_vdc_rest_url = ''.join(url_list)
2856
2857 #Creating all networks as Direct Org VDC type networks.
2858 #Unused in case of Underlay (data/ptp) network interface.
2859 fence_mode="bridged"
2860 is_inherited='false'
2861 dns_list = dns_address.split(";")
2862 dns1 = dns_list[0]
2863 dns2_text = ""
2864 if len(dns_list) >= 2:
2865 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2866 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2867 <Description>Openmano created</Description>
2868 <Configuration>
2869 <IpScopes>
2870 <IpScope>
2871 <IsInherited>{1:s}</IsInherited>
2872 <Gateway>{2:s}</Gateway>
2873 <Netmask>{3:s}</Netmask>
2874 <Dns1>{4:s}</Dns1>{5:s}
2875 <IsEnabled>{6:s}</IsEnabled>
2876 <IpRanges>
2877 <IpRange>
2878 <StartAddress>{7:s}</StartAddress>
2879 <EndAddress>{8:s}</EndAddress>
2880 </IpRange>
2881 </IpRanges>
2882 </IpScope>
2883 </IpScopes>
2884 <ParentNetwork href="{9:s}"/>
2885 <FenceMode>{10:s}</FenceMode>
2886 </Configuration>
2887 <IsShared>{11:s}</IsShared>
2888 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2889 subnet_address, dns1, dns2_text, dhcp_enabled,
2890 dhcp_start_address, dhcp_end_address, available_networks,
2891 fence_mode, isshared)
2892
2893 headers = vca.vcloud_session.get_vcloud_headers()
2894 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2895 try:
2896 response = Http.post(url=add_vdc_rest_url,
2897 headers=headers,
2898 data=data,
2899 verify=vca.verify,
2900 logger=vca.logger)
2901
2902 if response.status_code != 201:
2903 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2904 .format(response.status_code,response.content))
2905 else:
2906 network = networkType.parseString(response.content, True)
2907 create_nw_task = network.get_Tasks().get_Task()[0]
2908
2909 # if we all ok we respond with content after network creation completes
2910 # otherwise by default return None
2911 if create_nw_task is not None:
2912 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2913 status = vca.block_until_completed(create_nw_task)
2914 if status:
2915 return response.content
2916 else:
2917 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2918 .format(response.content))
2919 except Exception as exp:
2920 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2921
2922 return None
2923
2924 def convert_cidr_to_netmask(self, cidr_ip=None):
2925 """
2926 Method sets convert CIDR netmask address to normal IP format
2927 Args:
2928 cidr_ip : CIDR IP address
2929 Returns:
2930 netmask : Converted netmask
2931 """
2932 if cidr_ip is not None:
2933 if '/' in cidr_ip:
2934 network, net_bits = cidr_ip.split('/')
2935 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2936 else:
2937 netmask = cidr_ip
2938 return netmask
2939 return None
2940
2941 def get_provider_rest(self, vca=None):
2942 """
2943 Method gets provider vdc view from vcloud director
2944
2945 Args:
2946 network_name - is network name to be created.
2947 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2948 It optional attribute. by default if no parent network indicate the first available will be used.
2949
2950 Returns:
2951 The return xml content of respond or None
2952 """
2953
2954 url_list = [vca.host, '/api/admin']
2955 response = Http.get(url=''.join(url_list),
2956 headers=vca.vcloud_session.get_vcloud_headers(),
2957 verify=vca.verify,
2958 logger=vca.logger)
2959
2960 if response.status_code == requests.codes.ok:
2961 return response.content
2962 return None
2963
2964 def create_vdc(self, vdc_name=None):
2965
2966 vdc_dict = {}
2967
2968 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2969 if xml_content is not None:
2970 try:
2971 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2972 for child in task_resp_xmlroot:
2973 if child.tag.split("}")[1] == 'Owner':
2974 vdc_id = child.attrib.get('href').split("/")[-1]
2975 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2976 return vdc_dict
2977 except:
2978 self.logger.debug("Respond body {}".format(xml_content))
2979
2980 return None
2981
2982 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2983 """
2984 Method create vdc in vCloud director based on VDC template.
2985 it uses pre-defined template that must be named openmano
2986
2987 Args:
2988 vdc_name - name of a new vdc.
2989
2990 Returns:
2991 The return xml content of respond or None
2992 """
2993
2994 self.logger.info("Creating new vdc {}".format(vdc_name))
2995 vca = self.connect()
2996 if not vca:
2997 raise vimconn.vimconnConnectionException("self.connect() is failed")
2998 if vdc_name is None:
2999 return None
3000
3001 url_list = [vca.host, '/api/vdcTemplates']
3002 vm_list_rest_call = ''.join(url_list)
3003 response = Http.get(url=vm_list_rest_call,
3004 headers=vca.vcloud_session.get_vcloud_headers(),
3005 verify=vca.verify,
3006 logger=vca.logger)
3007
3008 # container url to a template
3009 vdc_template_ref = None
3010 try:
3011 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3012 for child in vm_list_xmlroot:
3013 # application/vnd.vmware.admin.providervdc+xml
3014 # we need find a template from witch we instantiate VDC
3015 if child.tag.split("}")[1] == 'VdcTemplate':
3016 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3017 vdc_template_ref = child.attrib.get('href')
3018 except:
3019 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3020 self.logger.debug("Respond body {}".format(response.content))
3021 return None
3022
3023 # if we didn't found required pre defined template we return None
3024 if vdc_template_ref is None:
3025 return None
3026
3027 try:
3028 # instantiate vdc
3029 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3030 vm_list_rest_call = ''.join(url_list)
3031 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3032 <Source href="{1:s}"></Source>
3033 <Description>opnemano</Description>
3034 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3035 headers = vca.vcloud_session.get_vcloud_headers()
3036 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3037 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3038 logger=vca.logger)
3039
3040 vdc_task = taskType.parseString(response.content, True)
3041 if type(vdc_task) is GenericTask:
3042 self.vca.block_until_completed(vdc_task)
3043
3044 # if we all ok we respond with content otherwise by default None
3045 if response.status_code >= 200 and response.status_code < 300:
3046 return response.content
3047 return None
3048 except:
3049 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3050 self.logger.debug("Respond body {}".format(response.content))
3051
3052 return None
3053
3054 def create_vdc_rest(self, vdc_name=None):
3055 """
3056 Method create network in vCloud director
3057
3058 Args:
3059 network_name - is network name to be created.
3060 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3061 It optional attribute. by default if no parent network indicate the first available will be used.
3062
3063 Returns:
3064 The return network uuid or return None
3065 """
3066
3067 self.logger.info("Creating new vdc {}".format(vdc_name))
3068
3069 vca = self.connect_as_admin()
3070 if not vca:
3071 raise vimconn.vimconnConnectionException("self.connect() is failed")
3072 if vdc_name is None:
3073 return None
3074
3075 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3076 vm_list_rest_call = ''.join(url_list)
3077 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3078 response = Http.get(url=vm_list_rest_call,
3079 headers=vca.vcloud_session.get_vcloud_headers(),
3080 verify=vca.verify,
3081 logger=vca.logger)
3082
3083 provider_vdc_ref = None
3084 add_vdc_rest_url = None
3085 available_networks = None
3086
3087 if response.status_code != requests.codes.ok:
3088 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3089 response.status_code))
3090 return None
3091 else:
3092 try:
3093 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3094 for child in vm_list_xmlroot:
3095 # application/vnd.vmware.admin.providervdc+xml
3096 if child.tag.split("}")[1] == 'Link':
3097 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3098 and child.attrib.get('rel') == 'add':
3099 add_vdc_rest_url = child.attrib.get('href')
3100 except:
3101 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3102 self.logger.debug("Respond body {}".format(response.content))
3103 return None
3104
3105 response = self.get_provider_rest(vca=vca)
3106 try:
3107 vm_list_xmlroot = XmlElementTree.fromstring(response)
3108 for child in vm_list_xmlroot:
3109 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3110 for sub_child in child:
3111 provider_vdc_ref = sub_child.attrib.get('href')
3112 except:
3113 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3114 self.logger.debug("Respond body {}".format(response))
3115 return None
3116
3117 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3118 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3119 <AllocationModel>ReservationPool</AllocationModel>
3120 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3121 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3122 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3123 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3124 <ProviderVdcReference
3125 name="Main Provider"
3126 href="{2:s}" />
3127 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3128 escape(vdc_name),
3129 provider_vdc_ref)
3130
3131 headers = vca.vcloud_session.get_vcloud_headers()
3132 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3133 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3134 logger=vca.logger)
3135
3136 # if we all ok we respond with content otherwise by default None
3137 if response.status_code == 201:
3138 return response.content
3139 return None
3140
3141 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3142 """
3143 Method retrieve vapp detail from vCloud director
3144
3145 Args:
3146 vapp_uuid - is vapp identifier.
3147
3148 Returns:
3149 The return network uuid or return None
3150 """
3151
3152 parsed_respond = {}
3153 vca = None
3154
3155 if need_admin_access:
3156 vca = self.connect_as_admin()
3157 else:
3158 vca = self.vca
3159
3160 if not vca:
3161 raise vimconn.vimconnConnectionException("self.connect() is failed")
3162 if vapp_uuid is None:
3163 return None
3164
3165 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3166 get_vapp_restcall = ''.join(url_list)
3167
3168 if vca.vcloud_session and vca.vcloud_session.organization:
3169 response = Http.get(url=get_vapp_restcall,
3170 headers=vca.vcloud_session.get_vcloud_headers(),
3171 verify=vca.verify,
3172 logger=vca.logger)
3173
3174 if response.status_code == 403:
3175 if need_admin_access == False:
3176 response = self.retry_rest('GET', get_vapp_restcall)
3177
3178 if response.status_code != requests.codes.ok:
3179 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3180 response.status_code))
3181 return parsed_respond
3182
3183 try:
3184 xmlroot_respond = XmlElementTree.fromstring(response.content)
3185 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3186
3187 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3188 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3189 'vmw': 'http://www.vmware.com/schema/ovf',
3190 'vm': 'http://www.vmware.com/vcloud/v1.5',
3191 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3192 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3193 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3194 }
3195
3196 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3197 if created_section is not None:
3198 parsed_respond['created'] = created_section.text
3199
3200 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3201 if network_section is not None and 'networkName' in network_section.attrib:
3202 parsed_respond['networkname'] = network_section.attrib['networkName']
3203
3204 ipscopes_section = \
3205 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3206 namespaces)
3207 if ipscopes_section is not None:
3208 for ipscope in ipscopes_section:
3209 for scope in ipscope:
3210 tag_key = scope.tag.split("}")[1]
3211 if tag_key == 'IpRanges':
3212 ip_ranges = scope.getchildren()
3213 for ipblock in ip_ranges:
3214 for block in ipblock:
3215 parsed_respond[block.tag.split("}")[1]] = block.text
3216 else:
3217 parsed_respond[tag_key] = scope.text
3218
3219 # parse children section for other attrib
3220 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3221 if children_section is not None:
3222 parsed_respond['name'] = children_section.attrib['name']
3223 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3224 if "nestedHypervisorEnabled" in children_section.attrib else None
3225 parsed_respond['deployed'] = children_section.attrib['deployed']
3226 parsed_respond['status'] = children_section.attrib['status']
3227 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3228 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3229 nic_list = []
3230 for adapters in network_adapter:
3231 adapter_key = adapters.tag.split("}")[1]
3232 if adapter_key == 'PrimaryNetworkConnectionIndex':
3233 parsed_respond['primarynetwork'] = adapters.text
3234 if adapter_key == 'NetworkConnection':
3235 vnic = {}
3236 if 'network' in adapters.attrib:
3237 vnic['network'] = adapters.attrib['network']
3238 for adapter in adapters:
3239 setting_key = adapter.tag.split("}")[1]
3240 vnic[setting_key] = adapter.text
3241 nic_list.append(vnic)
3242
3243 for link in children_section:
3244 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3245 if link.attrib['rel'] == 'screen:acquireTicket':
3246 parsed_respond['acquireTicket'] = link.attrib
3247 if link.attrib['rel'] == 'screen:acquireMksTicket':
3248 parsed_respond['acquireMksTicket'] = link.attrib
3249
3250 parsed_respond['interfaces'] = nic_list
3251 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3252 if vCloud_extension_section is not None:
3253 vm_vcenter_info = {}
3254 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3255 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3256 if vmext is not None:
3257 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3258 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3259
3260 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3261 vm_virtual_hardware_info = {}
3262 if virtual_hardware_section is not None:
3263 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3264 if item.find("rasd:Description",namespaces).text == "Hard disk":
3265 disk_size = item.find("rasd:HostResource" ,namespaces
3266 ).attrib["{"+namespaces['vm']+"}capacity"]
3267
3268 vm_virtual_hardware_info["disk_size"]= disk_size
3269 break
3270
3271 for link in virtual_hardware_section:
3272 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3273 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3274 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3275 break
3276
3277 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3278 except Exception as exp :
3279 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3280 return parsed_respond
3281
3282 def acuire_console(self, vm_uuid=None):
3283
3284 if vm_uuid is None:
3285 return None
3286
3287 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3288 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3289 console_dict = vm_dict['acquireTicket']
3290 console_rest_call = console_dict['href']
3291
3292 response = Http.post(url=console_rest_call,
3293 headers=self.vca.vcloud_session.get_vcloud_headers(),
3294 verify=self.vca.verify,
3295 logger=self.vca.logger)
3296 if response.status_code == 403:
3297 response = self.retry_rest('POST', console_rest_call)
3298
3299 if response.status_code == requests.codes.ok:
3300 return response.content
3301
3302 return None
3303
3304 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3305 """
3306 Method retrieve vm disk details
3307
3308 Args:
3309 vapp_uuid - is vapp identifier.
3310 flavor_disk - disk size as specified in VNFD (flavor)
3311
3312 Returns:
3313 The return network uuid or return None
3314 """
3315 status = None
3316 try:
3317 #Flavor disk is in GB convert it into MB
3318 flavor_disk = int(flavor_disk) * 1024
3319 vm_details = self.get_vapp_details_rest(vapp_uuid)
3320 if vm_details:
3321 vm_name = vm_details["name"]
3322 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3323
3324 if vm_details and "vm_virtual_hardware" in vm_details:
3325 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3326 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3327
3328 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3329
3330 if flavor_disk > vm_disk:
3331 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3332 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3333 vm_disk, flavor_disk ))
3334 else:
3335 status = True
3336 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3337
3338 return status
3339 except Exception as exp:
3340 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3341
3342
3343 def modify_vm_disk_rest(self, disk_href , disk_size):
3344 """
3345 Method retrieve modify vm disk size
3346
3347 Args:
3348 disk_href - vCD API URL to GET and PUT disk data
3349 disk_size - disk size as specified in VNFD (flavor)
3350
3351 Returns:
3352 The return network uuid or return None
3353 """
3354 if disk_href is None or disk_size is None:
3355 return None
3356
3357 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3358 response = Http.get(url=disk_href,
3359 headers=self.vca.vcloud_session.get_vcloud_headers(),
3360 verify=self.vca.verify,
3361 logger=self.vca.logger)
3362
3363 if response.status_code == 403:
3364 response = self.retry_rest('GET', disk_href)
3365
3366 if response.status_code != requests.codes.ok:
3367 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3368 response.status_code))
3369 return None
3370 try:
3371 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3372 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3373 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3374
3375 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3376 if item.find("rasd:Description",namespaces).text == "Hard disk":
3377 disk_item = item.find("rasd:HostResource" ,namespaces )
3378 if disk_item is not None:
3379 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3380 break
3381
3382 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3383 xml_declaration=True)
3384
3385 #Send PUT request to modify disk size
3386 headers = self.vca.vcloud_session.get_vcloud_headers()
3387 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3388
3389 response = Http.put(url=disk_href,
3390 data=data,
3391 headers=headers,
3392 verify=self.vca.verify, logger=self.logger)
3393
3394 if response.status_code == 403:
3395 add_headers = {'Content-Type': headers['Content-Type']}
3396 response = self.retry_rest('PUT', disk_href, add_headers, data)
3397
3398 if response.status_code != 202:
3399 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3400 response.status_code))
3401 else:
3402 modify_disk_task = taskType.parseString(response.content, True)
3403 if type(modify_disk_task) is GenericTask:
3404 status = self.vca.block_until_completed(modify_disk_task)
3405 return status
3406
3407 return None
3408
3409 except Exception as exp :
3410 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3411 return None
3412
3413 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3414 """
3415 Method to attach pci devices to VM
3416
3417 Args:
3418 vapp_uuid - uuid of vApp/VM
3419 pci_devices - pci devices infromation as specified in VNFD (flavor)
3420
3421 Returns:
3422 The status of add pci device task , vm object and
3423 vcenter_conect object
3424 """
3425 vm_obj = None
3426 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3427 vcenter_conect, content = self.get_vcenter_content()
3428 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3429
3430 if vm_moref_id:
3431 try:
3432 no_of_pci_devices = len(pci_devices)
3433 if no_of_pci_devices > 0:
3434 #Get VM and its host
3435 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3436 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3437 if host_obj and vm_obj:
3438 #get PCI devies from host on which vapp is currently installed
3439 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3440
3441 if avilable_pci_devices is None:
3442 #find other hosts with active pci devices
3443 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3444 content,
3445 no_of_pci_devices
3446 )
3447
3448 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3449 #Migrate vm to the host where PCI devices are availble
3450 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3451 task = self.relocate_vm(new_host_obj, vm_obj)
3452 if task is not None:
3453 result = self.wait_for_vcenter_task(task, vcenter_conect)
3454 self.logger.info("Migrate VM status: {}".format(result))
3455 host_obj = new_host_obj
3456 else:
3457 self.logger.info("Fail to migrate VM : {}".format(result))
3458 raise vimconn.vimconnNotFoundException(
3459 "Fail to migrate VM : {} to host {}".format(
3460 vmname_andid,
3461 new_host_obj)
3462 )
3463
3464 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3465 #Add PCI devices one by one
3466 for pci_device in avilable_pci_devices:
3467 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3468 if task:
3469 status= self.wait_for_vcenter_task(task, vcenter_conect)
3470 if status:
3471 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3472 else:
3473 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3474 return True, vm_obj, vcenter_conect
3475 else:
3476 self.logger.error("Currently there is no host with"\
3477 " {} number of avaialble PCI devices required for VM {}".format(
3478 no_of_pci_devices,
3479 vmname_andid)
3480 )
3481 raise vimconn.vimconnNotFoundException(
3482 "Currently there is no host with {} "\
3483 "number of avaialble PCI devices required for VM {}".format(
3484 no_of_pci_devices,
3485 vmname_andid))
3486 else:
3487 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3488
3489 except vmodl.MethodFault as error:
3490 self.logger.error("Error occurred while adding PCI devices {} ",error)
3491 return None, vm_obj, vcenter_conect
3492
3493 def get_vm_obj(self, content, mob_id):
3494 """
3495 Method to get the vsphere VM object associated with a given morf ID
3496 Args:
3497 vapp_uuid - uuid of vApp/VM
3498 content - vCenter content object
3499 mob_id - mob_id of VM
3500
3501 Returns:
3502 VM and host object
3503 """
3504 vm_obj = None
3505 host_obj = None
3506 try :
3507 container = content.viewManager.CreateContainerView(content.rootFolder,
3508 [vim.VirtualMachine], True
3509 )
3510 for vm in container.view:
3511 mobID = vm._GetMoId()
3512 if mobID == mob_id:
3513 vm_obj = vm
3514 host_obj = vm_obj.runtime.host
3515 break
3516 except Exception as exp:
3517 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3518 return host_obj, vm_obj
3519
3520 def get_pci_devices(self, host, need_devices):
3521 """
3522 Method to get the details of pci devices on given host
3523 Args:
3524 host - vSphere host object
3525 need_devices - number of pci devices needed on host
3526
3527 Returns:
3528 array of pci devices
3529 """
3530 all_devices = []
3531 all_device_ids = []
3532 used_devices_ids = []
3533
3534 try:
3535 if host:
3536 pciPassthruInfo = host.config.pciPassthruInfo
3537 pciDevies = host.hardware.pciDevice
3538
3539 for pci_status in pciPassthruInfo:
3540 if pci_status.passthruActive:
3541 for device in pciDevies:
3542 if device.id == pci_status.id:
3543 all_device_ids.append(device.id)
3544 all_devices.append(device)
3545
3546 #check if devices are in use
3547 avalible_devices = all_devices
3548 for vm in host.vm:
3549 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3550 vm_devices = vm.config.hardware.device
3551 for device in vm_devices:
3552 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3553 if device.backing.id in all_device_ids:
3554 for use_device in avalible_devices:
3555 if use_device.id == device.backing.id:
3556 avalible_devices.remove(use_device)
3557 used_devices_ids.append(device.backing.id)
3558 self.logger.debug("Device {} from devices {}"\
3559 "is in use".format(device.backing.id,
3560 device)
3561 )
3562 if len(avalible_devices) < need_devices:
3563 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3564 need_devices))
3565 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3566 avalible_devices))
3567 return None
3568 else:
3569 required_devices = avalible_devices[:need_devices]
3570 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3571 len(avalible_devices),
3572 host,
3573 need_devices))
3574 self.logger.info("Retruning {} devices as {}".format(need_devices,
3575 required_devices ))
3576 return required_devices
3577
3578 except Exception as exp:
3579 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3580
3581 return None
3582
3583 def get_host_and_PCIdevices(self, content, need_devices):
3584 """
3585 Method to get the details of pci devices infromation on all hosts
3586
3587 Args:
3588 content - vSphere host object
3589 need_devices - number of pci devices needed on host
3590
3591 Returns:
3592 array of pci devices and host object
3593 """
3594 host_obj = None
3595 pci_device_objs = None
3596 try:
3597 if content:
3598 container = content.viewManager.CreateContainerView(content.rootFolder,
3599 [vim.HostSystem], True)
3600 for host in container.view:
3601 devices = self.get_pci_devices(host, need_devices)
3602 if devices:
3603 host_obj = host
3604 pci_device_objs = devices
3605 break
3606 except Exception as exp:
3607 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3608
3609 return host_obj,pci_device_objs
3610
3611 def relocate_vm(self, dest_host, vm) :
3612 """
3613 Method to get the relocate VM to new host
3614
3615 Args:
3616 dest_host - vSphere host object
3617 vm - vSphere VM object
3618
3619 Returns:
3620 task object
3621 """
3622 task = None
3623 try:
3624 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3625 task = vm.Relocate(relocate_spec)
3626 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3627 except Exception as exp:
3628 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3629 dest_host, vm, exp))
3630 return task
3631
3632 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3633 """
3634 Waits and provides updates on a vSphere task
3635 """
3636 while task.info.state == vim.TaskInfo.State.running:
3637 time.sleep(2)
3638
3639 if task.info.state == vim.TaskInfo.State.success:
3640 if task.info.result is not None and not hideResult:
3641 self.logger.info('{} completed successfully, result: {}'.format(
3642 actionName,
3643 task.info.result))
3644 else:
3645 self.logger.info('Task {} completed successfully.'.format(actionName))
3646 else:
3647 self.logger.error('{} did not complete successfully: {} '.format(
3648 actionName,
3649 task.info.error)
3650 )
3651
3652 return task.info.result
3653
3654 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3655 """
3656 Method to add pci device in given VM
3657
3658 Args:
3659 host_object - vSphere host object
3660 vm_object - vSphere VM object
3661 host_pci_dev - host_pci_dev must be one of the devices from the
3662 host_object.hardware.pciDevice list
3663 which is configured as a PCI passthrough device
3664
3665 Returns:
3666 task object
3667 """
3668 task = None
3669 if vm_object and host_object and host_pci_dev:
3670 try :
3671 #Add PCI device to VM
3672 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3673 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3674
3675 if host_pci_dev.id not in systemid_by_pciid:
3676 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3677 return None
3678
3679 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3680 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3681 id=host_pci_dev.id,
3682 systemId=systemid_by_pciid[host_pci_dev.id],
3683 vendorId=host_pci_dev.vendorId,
3684 deviceName=host_pci_dev.deviceName)
3685
3686 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3687
3688 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3689 new_device_config.operation = "add"
3690 vmConfigSpec = vim.vm.ConfigSpec()
3691 vmConfigSpec.deviceChange = [new_device_config]
3692
3693 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3694 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3695 host_pci_dev, vm_object, host_object)
3696 )
3697 except Exception as exp:
3698 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3699 host_pci_dev,
3700 vm_object,
3701 exp))
3702 return task
3703
3704 def get_vm_vcenter_info(self):
3705 """
3706 Method to get details of vCenter and vm
3707
3708 Args:
3709 vapp_uuid - uuid of vApp or VM
3710
3711 Returns:
3712 Moref Id of VM and deails of vCenter
3713 """
3714 vm_vcenter_info = {}
3715
3716 if self.vcenter_ip is not None:
3717 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3718 else:
3719 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3720 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3721 if self.vcenter_port is not None:
3722 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3723 else:
3724 raise vimconn.vimconnException(message="vCenter port is not provided."\
3725 " Please provide vCenter port while attaching datacenter to tenant in --config")
3726 if self.vcenter_user is not None:
3727 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3728 else:
3729 raise vimconn.vimconnException(message="vCenter user is not provided."\
3730 " Please provide vCenter user while attaching datacenter to tenant in --config")
3731
3732 if self.vcenter_password is not None:
3733 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3734 else:
3735 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3736 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3737
3738 return vm_vcenter_info
3739
3740
3741 def get_vm_pci_details(self, vmuuid):
3742 """
3743 Method to get VM PCI device details from vCenter
3744
3745 Args:
3746 vm_obj - vSphere VM object
3747
3748 Returns:
3749 dict of PCI devives attached to VM
3750
3751 """
3752 vm_pci_devices_info = {}
3753 try:
3754 vcenter_conect, content = self.get_vcenter_content()
3755 vm_moref_id = self.get_vm_moref_id(vmuuid)
3756 if vm_moref_id:
3757 #Get VM and its host
3758 if content:
3759 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3760 if host_obj and vm_obj:
3761 vm_pci_devices_info["host_name"]= host_obj.name
3762 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3763 for device in vm_obj.config.hardware.device:
3764 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3765 device_details={'devide_id':device.backing.id,
3766 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3767 }
3768 vm_pci_devices_info[device.deviceInfo.label] = device_details
3769 else:
3770 self.logger.error("Can not connect to vCenter while getting "\
3771 "PCI devices infromationn")
3772 return vm_pci_devices_info
3773 except Exception as exp:
3774 self.logger.error("Error occurred while getting VM infromationn"\
3775 " for VM : {}".format(exp))
3776 raise vimconn.vimconnException(message=exp)
3777
3778 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3779 """
3780 Method to add network adapter type to vm
3781 Args :
3782 network_name - name of network
3783 primary_nic_index - int value for primary nic index
3784 nicIndex - int value for nic index
3785 nic_type - specify model name to which add to vm
3786 Returns:
3787 None
3788 """
3789
3790 try:
3791 ip_address = None
3792 floating_ip = False
3793 if 'floating_ip' in net: floating_ip = net['floating_ip']
3794
3795 # Stub for ip_address feature
3796 if 'ip_address' in net: ip_address = net['ip_address']
3797
3798 if floating_ip:
3799 allocation_mode = "POOL"
3800 elif ip_address:
3801 allocation_mode = "MANUAL"
3802 else:
3803 allocation_mode = "DHCP"
3804
3805 if not nic_type:
3806 for vms in vapp._get_vms():
3807 vm_id = (vms.id).split(':')[-1]
3808
3809 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3810
3811 response = Http.get(url=url_rest_call,
3812 headers=self.vca.vcloud_session.get_vcloud_headers(),
3813 verify=self.vca.verify,
3814 logger=self.vca.logger)
3815
3816 if response.status_code == 403:
3817 response = self.retry_rest('GET', url_rest_call)
3818
3819 if response.status_code != 200:
3820 self.logger.error("REST call {} failed reason : {}"\
3821 "status code : {}".format(url_rest_call,
3822 response.content,
3823 response.status_code))
3824 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3825 "network connection section")
3826
3827 data = response.content
3828 if '<PrimaryNetworkConnectionIndex>' not in data:
3829 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3830 <NetworkConnection network="{}">
3831 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3832 <IsConnected>true</IsConnected>
3833 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3834 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3835 allocation_mode)
3836 # Stub for ip_address feature
3837 if ip_address:
3838 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3839 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3840
3841 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3842 else:
3843 new_item = """<NetworkConnection network="{}">
3844 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3845 <IsConnected>true</IsConnected>
3846 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3847 </NetworkConnection>""".format(network_name, nicIndex,
3848 allocation_mode)
3849 # Stub for ip_address feature
3850 if ip_address:
3851 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3852 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3853
3854 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3855
3856 headers = self.vca.vcloud_session.get_vcloud_headers()
3857 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3858 response = Http.put(url=url_rest_call, headers=headers, data=data,
3859 verify=self.vca.verify,
3860 logger=self.vca.logger)
3861
3862 if response.status_code == 403:
3863 add_headers = {'Content-Type': headers['Content-Type']}
3864 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3865
3866 if response.status_code != 202:
3867 self.logger.error("REST call {} failed reason : {}"\
3868 "status code : {} ".format(url_rest_call,
3869 response.content,
3870 response.status_code))
3871 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3872 "network connection section")
3873 else:
3874 nic_task = taskType.parseString(response.content, True)
3875 if isinstance(nic_task, GenericTask):
3876 self.vca.block_until_completed(nic_task)
3877 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3878 "default NIC type".format(vm_id))
3879 else:
3880 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3881 "connect NIC type".format(vm_id))
3882 else:
3883 for vms in vapp._get_vms():
3884 vm_id = (vms.id).split(':')[-1]
3885
3886 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3887
3888 response = Http.get(url=url_rest_call,
3889 headers=self.vca.vcloud_session.get_vcloud_headers(),
3890 verify=self.vca.verify,
3891 logger=self.vca.logger)
3892
3893 if response.status_code == 403:
3894 response = self.retry_rest('GET', url_rest_call)
3895
3896 if response.status_code != 200:
3897 self.logger.error("REST call {} failed reason : {}"\
3898 "status code : {}".format(url_rest_call,
3899 response.content,
3900 response.status_code))
3901 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3902 "network connection section")
3903 data = response.content
3904 if '<PrimaryNetworkConnectionIndex>' not in data:
3905 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3906 <NetworkConnection network="{}">
3907 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3908 <IsConnected>true</IsConnected>
3909 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3910 <NetworkAdapterType>{}</NetworkAdapterType>
3911 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3912 allocation_mode, nic_type)
3913 # Stub for ip_address feature
3914 if ip_address:
3915 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3916 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3917
3918 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3919 else:
3920 new_item = """<NetworkConnection network="{}">
3921 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3922 <IsConnected>true</IsConnected>
3923 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3924 <NetworkAdapterType>{}</NetworkAdapterType>
3925 </NetworkConnection>""".format(network_name, nicIndex,
3926 allocation_mode, nic_type)
3927 # Stub for ip_address feature
3928 if ip_address:
3929 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3930 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3931
3932 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3933
3934 headers = self.vca.vcloud_session.get_vcloud_headers()
3935 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3936 response = Http.put(url=url_rest_call, headers=headers, data=data,
3937 verify=self.vca.verify,
3938 logger=self.vca.logger)
3939
3940 if response.status_code == 403:
3941 add_headers = {'Content-Type': headers['Content-Type']}
3942 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3943
3944 if response.status_code != 202:
3945 self.logger.error("REST call {} failed reason : {}"\
3946 "status code : {}".format(url_rest_call,
3947 response.content,
3948 response.status_code))
3949 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3950 "network connection section")
3951 else:
3952 nic_task = taskType.parseString(response.content, True)
3953 if isinstance(nic_task, GenericTask):
3954 self.vca.block_until_completed(nic_task)
3955 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3956 "conneced to NIC type {}".format(vm_id, nic_type))
3957 else:
3958 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3959 "failed to connect NIC type {}".format(vm_id, nic_type))
3960 except Exception as exp:
3961 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3962 "while adding Network adapter")
3963 raise vimconn.vimconnException(message=exp)
3964
3965
3966 def set_numa_affinity(self, vmuuid, paired_threads_id):
3967 """
3968 Method to assign numa affinity in vm configuration parammeters
3969 Args :
3970 vmuuid - vm uuid
3971 paired_threads_id - one or more virtual processor
3972 numbers
3973 Returns:
3974 return if True
3975 """
3976 try:
3977 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3978 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3979 context = None
3980 if hasattr(ssl, '_create_unverified_context'):
3981 context = ssl._create_unverified_context()
3982 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3983 pwd=self.passwd, port=int(vm_vcenter_port),
3984 sslContext=context)
3985 atexit.register(Disconnect, vcenter_conect)
3986 content = vcenter_conect.RetrieveContent()
3987
3988 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3989 if vm_obj:
3990 config_spec = vim.vm.ConfigSpec()
3991 config_spec.extraConfig = []
3992 opt = vim.option.OptionValue()
3993 opt.key = 'numa.nodeAffinity'
3994 opt.value = str(paired_threads_id)
3995 config_spec.extraConfig.append(opt)
3996 task = vm_obj.ReconfigVM_Task(config_spec)
3997 if task:
3998 result = self.wait_for_vcenter_task(task, vcenter_conect)
3999 extra_config = vm_obj.config.extraConfig
4000 flag = False
4001 for opts in extra_config:
4002 if 'numa.nodeAffinity' in opts.key:
4003 flag = True
4004 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4005 "value {} for vm {}".format(opt.value, vm_obj))
4006 if flag:
4007 return
4008 else:
4009 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4010 except Exception as exp:
4011 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4012 "for VM {} : {}".format(vm_obj, vm_moref_id))
4013 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4014 "affinity".format(exp))
4015
4016
4017
4018 def cloud_init(self, vapp, cloud_config):
4019 """
4020 Method to inject ssh-key
4021 vapp - vapp object
4022 cloud_config a dictionary with:
4023 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4024 'users': (optional) list of users to be inserted, each item is a dict with:
4025 'name': (mandatory) user name,
4026 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4027 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4028 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4029 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4030 'dest': (mandatory) string with the destination absolute path
4031 'encoding': (optional, by default text). Can be one of:
4032 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4033 'content' (mandatory): string with the content of the file
4034 'permissions': (optional) string with file permissions, typically octal notation '0644'
4035 'owner': (optional) file owner, string with the format 'owner:group'
4036 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4037 """
4038
4039 try:
4040 if isinstance(cloud_config, dict):
4041 key_pairs = []
4042 userdata = []
4043 if "key-pairs" in cloud_config:
4044 key_pairs = cloud_config["key-pairs"]
4045
4046 if "users" in cloud_config:
4047 userdata = cloud_config["users"]
4048
4049 for key in key_pairs:
4050 for user in userdata:
4051 if 'name' in user: user_name = user['name']
4052 if 'key-pairs' in user and len(user['key-pairs']) > 0:
4053 for user_key in user['key-pairs']:
4054 customize_script = """
4055 #!/bin/bash
4056 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4057 if [ "$1" = "precustomization" ];then
4058 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4059 if [ ! -d /root/.ssh ];then
4060 mkdir /root/.ssh
4061 chown root:root /root/.ssh
4062 chmod 700 /root/.ssh
4063 touch /root/.ssh/authorized_keys
4064 chown root:root /root/.ssh/authorized_keys
4065 chmod 600 /root/.ssh/authorized_keys
4066 # make centos with selinux happy
4067 which restorecon && restorecon -Rv /root/.ssh
4068 echo '{key}' >> /root/.ssh/authorized_keys
4069 else
4070 touch /root/.ssh/authorized_keys
4071 chown root:root /root/.ssh/authorized_keys
4072 chmod 600 /root/.ssh/authorized_keys
4073 echo '{key}' >> /root/.ssh/authorized_keys
4074 fi
4075 if [ -d /home/{user_name} ];then
4076 if [ ! -d /home/{user_name}/.ssh ];then
4077 mkdir /home/{user_name}/.ssh
4078 chown {user_name}:{user_name} /home/{user_name}/.ssh
4079 chmod 700 /home/{user_name}/.ssh
4080 touch /home/{user_name}/.ssh/authorized_keys
4081 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4082 chmod 600 /home/{user_name}/.ssh/authorized_keys
4083 # make centos with selinux happy
4084 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4085 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4086 else
4087 touch /home/{user_name}/.ssh/authorized_keys
4088 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4089 chmod 600 /home/{user_name}/.ssh/authorized_keys
4090 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4091 fi
4092 fi
4093 fi""".format(key=key, user_name=user_name, user_key=user_key)
4094
4095 for vm in vapp._get_vms():
4096 vm_name = vm.name
4097 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4098 if isinstance(task, GenericTask):
4099 self.vca.block_until_completed(task)
4100 self.logger.info("cloud_init : customized guest os task "\
4101 "completed for VM {}".format(vm_name))
4102 else:
4103 self.logger.error("cloud_init : task for customized guest os"\
4104 "failed for VM {}".format(vm_name))
4105 except Exception as exp:
4106 self.logger.error("cloud_init : exception occurred while injecting "\
4107 "ssh-key")
4108 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4109 "ssh-key".format(exp))
4110
4111
4112 def add_new_disk(self, vapp_uuid, disk_size):
4113 """
4114 Method to create an empty vm disk
4115
4116 Args:
4117 vapp_uuid - is vapp identifier.
4118 disk_size - size of disk to be created in GB
4119
4120 Returns:
4121 None
4122 """
4123 status = False
4124 vm_details = None
4125 try:
4126 #Disk size in GB, convert it into MB
4127 if disk_size is not None:
4128 disk_size_mb = int(disk_size) * 1024
4129 vm_details = self.get_vapp_details_rest(vapp_uuid)
4130
4131 if vm_details and "vm_virtual_hardware" in vm_details:
4132 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4133 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4134 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4135
4136 except Exception as exp:
4137 msg = "Error occurred while creating new disk {}.".format(exp)
4138 self.rollback_newvm(vapp_uuid, msg)
4139
4140 if status:
4141 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4142 else:
4143 #If failed to add disk, delete VM
4144 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4145 self.rollback_newvm(vapp_uuid, msg)
4146
4147
4148 def add_new_disk_rest(self, disk_href, disk_size_mb):
4149 """
4150 Retrives vApp Disks section & add new empty disk
4151
4152 Args:
4153 disk_href: Disk section href to addd disk
4154 disk_size_mb: Disk size in MB
4155
4156 Returns: Status of add new disk task
4157 """
4158 status = False
4159 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4160 response = Http.get(url=disk_href,
4161 headers=self.vca.vcloud_session.get_vcloud_headers(),
4162 verify=self.vca.verify,
4163 logger=self.vca.logger)
4164
4165 if response.status_code == 403:
4166 response = self.retry_rest('GET', disk_href)
4167
4168 if response.status_code != requests.codes.ok:
4169 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4170 .format(disk_href, response.status_code))
4171 return status
4172 try:
4173 #Find but type & max of instance IDs assigned to disks
4174 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4175 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4176 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4177 instance_id = 0
4178 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4179 if item.find("rasd:Description",namespaces).text == "Hard disk":
4180 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4181 if inst_id > instance_id:
4182 instance_id = inst_id
4183 disk_item = item.find("rasd:HostResource" ,namespaces)
4184 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4185 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4186
4187 instance_id = instance_id + 1
4188 new_item = """<Item>
4189 <rasd:Description>Hard disk</rasd:Description>
4190 <rasd:ElementName>New disk</rasd:ElementName>
4191 <rasd:HostResource
4192 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4193 vcloud:capacity="{}"
4194 vcloud:busSubType="{}"
4195 vcloud:busType="{}"></rasd:HostResource>
4196 <rasd:InstanceID>{}</rasd:InstanceID>
4197 <rasd:ResourceType>17</rasd:ResourceType>
4198 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4199
4200 new_data = response.content
4201 #Add new item at the bottom
4202 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4203
4204 # Send PUT request to modify virtual hardware section with new disk
4205 headers = self.vca.vcloud_session.get_vcloud_headers()
4206 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4207
4208 response = Http.put(url=disk_href,
4209 data=new_data,
4210 headers=headers,
4211 verify=self.vca.verify, logger=self.logger)
4212
4213 if response.status_code == 403:
4214 add_headers = {'Content-Type': headers['Content-Type']}
4215 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4216
4217 if response.status_code != 202:
4218 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4219 .format(disk_href, response.status_code, response.content))
4220 else:
4221 add_disk_task = taskType.parseString(response.content, True)
4222 if type(add_disk_task) is GenericTask:
4223 status = self.vca.block_until_completed(add_disk_task)
4224 if not status:
4225 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4226
4227 except Exception as exp:
4228 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4229
4230 return status
4231
4232
4233 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4234 """
4235 Method to add existing disk to vm
4236 Args :
4237 catalogs - List of VDC catalogs
4238 image_id - Catalog ID
4239 template_name - Name of template in catalog
4240 vapp_uuid - UUID of vApp
4241 Returns:
4242 None
4243 """
4244 disk_info = None
4245 vcenter_conect, content = self.get_vcenter_content()
4246 #find moref-id of vm in image
4247 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4248 image_id=image_id,
4249 )
4250
4251 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4252 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4253 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4254 if catalog_vm_moref_id:
4255 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4256 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4257 if catalog_vm_obj:
4258 #find existing disk
4259 disk_info = self.find_disk(catalog_vm_obj)
4260 else:
4261 exp_msg = "No VM with image id {} found".format(image_id)
4262 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4263 else:
4264 exp_msg = "No Image found with image ID {} ".format(image_id)
4265 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4266
4267 if disk_info:
4268 self.logger.info("Existing disk_info : {}".format(disk_info))
4269 #get VM
4270 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4271 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4272 if vm_obj:
4273 status = self.add_disk(vcenter_conect=vcenter_conect,
4274 vm=vm_obj,
4275 disk_info=disk_info,
4276 size=size,
4277 vapp_uuid=vapp_uuid
4278 )
4279 if status:
4280 self.logger.info("Disk from image id {} added to {}".format(image_id,
4281 vm_obj.config.name)
4282 )
4283 else:
4284 msg = "No disk found with image id {} to add in VM {}".format(
4285 image_id,
4286 vm_obj.config.name)
4287 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4288
4289
4290 def find_disk(self, vm_obj):
4291 """
4292 Method to find details of existing disk in VM
4293 Args :
4294 vm_obj - vCenter object of VM
4295 image_id - Catalog ID
4296 Returns:
4297 disk_info : dict of disk details
4298 """
4299 disk_info = {}
4300 if vm_obj:
4301 try:
4302 devices = vm_obj.config.hardware.device
4303 for device in devices:
4304 if type(device) is vim.vm.device.VirtualDisk:
4305 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4306 disk_info["full_path"] = device.backing.fileName
4307 disk_info["datastore"] = device.backing.datastore
4308 disk_info["capacityKB"] = device.capacityInKB
4309 break
4310 except Exception as exp:
4311 self.logger.error("find_disk() : exception occurred while "\
4312 "getting existing disk details :{}".format(exp))
4313 return disk_info
4314
4315
4316 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4317 """
4318 Method to add existing disk in VM
4319 Args :
4320 vcenter_conect - vCenter content object
4321 vm - vCenter vm object
4322 disk_info : dict of disk details
4323 Returns:
4324 status : status of add disk task
4325 """
4326 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4327 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4328 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4329 if size is not None:
4330 #Convert size from GB to KB
4331 sizeKB = int(size) * 1024 * 1024
4332 #compare size of existing disk and user given size.Assign whicherver is greater
4333 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4334 sizeKB, capacityKB))
4335 if sizeKB > capacityKB:
4336 capacityKB = sizeKB
4337
4338 if datastore and fullpath and capacityKB:
4339 try:
4340 spec = vim.vm.ConfigSpec()
4341 # get all disks on a VM, set unit_number to the next available
4342 unit_number = 0
4343 for dev in vm.config.hardware.device:
4344 if hasattr(dev.backing, 'fileName'):
4345 unit_number = int(dev.unitNumber) + 1
4346 # unit_number 7 reserved for scsi controller
4347 if unit_number == 7:
4348 unit_number += 1
4349 if isinstance(dev, vim.vm.device.VirtualDisk):
4350 #vim.vm.device.VirtualSCSIController
4351 controller_key = dev.controllerKey
4352
4353 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4354 unit_number, controller_key))
4355 # add disk here
4356 dev_changes = []
4357 disk_spec = vim.vm.device.VirtualDeviceSpec()
4358 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4359 disk_spec.device = vim.vm.device.VirtualDisk()
4360 disk_spec.device.backing = \
4361 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4362 disk_spec.device.backing.thinProvisioned = True
4363 disk_spec.device.backing.diskMode = 'persistent'
4364 disk_spec.device.backing.datastore = datastore
4365 disk_spec.device.backing.fileName = fullpath
4366
4367 disk_spec.device.unitNumber = unit_number
4368 disk_spec.device.capacityInKB = capacityKB
4369 disk_spec.device.controllerKey = controller_key
4370 dev_changes.append(disk_spec)
4371 spec.deviceChange = dev_changes
4372 task = vm.ReconfigVM_Task(spec=spec)
4373 status = self.wait_for_vcenter_task(task, vcenter_conect)
4374 return status
4375 except Exception as exp:
4376 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4377 "{} to vm {}".format(exp,
4378 fullpath,
4379 vm.config.name)
4380 self.rollback_newvm(vapp_uuid, exp_msg)
4381 else:
4382 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4383 self.rollback_newvm(vapp_uuid, msg)
4384
4385
4386 def get_vcenter_content(self):
4387 """
4388 Get the vsphere content object
4389 """
4390 try:
4391 vm_vcenter_info = self.get_vm_vcenter_info()
4392 except Exception as exp:
4393 self.logger.error("Error occurred while getting vCenter infromationn"\
4394 " for VM : {}".format(exp))
4395 raise vimconn.vimconnException(message=exp)
4396
4397 context = None
4398 if hasattr(ssl, '_create_unverified_context'):
4399 context = ssl._create_unverified_context()
4400
4401 vcenter_conect = SmartConnect(
4402 host=vm_vcenter_info["vm_vcenter_ip"],
4403 user=vm_vcenter_info["vm_vcenter_user"],
4404 pwd=vm_vcenter_info["vm_vcenter_password"],
4405 port=int(vm_vcenter_info["vm_vcenter_port"]),
4406 sslContext=context
4407 )
4408 atexit.register(Disconnect, vcenter_conect)
4409 content = vcenter_conect.RetrieveContent()
4410 return vcenter_conect, content
4411
4412
4413 def get_vm_moref_id(self, vapp_uuid):
4414 """
4415 Get the moref_id of given VM
4416 """
4417 try:
4418 if vapp_uuid:
4419 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4420 if vm_details and "vm_vcenter_info" in vm_details:
4421 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4422
4423 return vm_moref_id
4424
4425 except Exception as exp:
4426 self.logger.error("Error occurred while getting VM moref ID "\
4427 " for VM : {}".format(exp))
4428 return None
4429
4430
4431 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4432 """
4433 Method to get vApp template details
4434 Args :
4435 catalogs - list of VDC catalogs
4436 image_id - Catalog ID to find
4437 template_name : template name in catalog
4438 Returns:
4439 parsed_respond : dict of vApp tempalte details
4440 """
4441 parsed_response = {}
4442
4443 vca = self.connect_as_admin()
4444 if not vca:
4445 raise vimconn.vimconnConnectionException("self.connect() is failed")
4446
4447 try:
4448 catalog = self.get_catalog_obj(image_id, catalogs)
4449 if catalog:
4450 template_name = self.get_catalogbyid(image_id, catalogs)
4451 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4452 if len(catalog_items) == 1:
4453 response = Http.get(catalog_items[0].get_href(),
4454 headers=vca.vcloud_session.get_vcloud_headers(),
4455 verify=vca.verify,
4456 logger=vca.logger)
4457 catalogItem = XmlElementTree.fromstring(response.content)
4458 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4459 vapp_tempalte_href = entity.get("href")
4460 #get vapp details and parse moref id
4461
4462 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4463 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4464 'vmw': 'http://www.vmware.com/schema/ovf',
4465 'vm': 'http://www.vmware.com/vcloud/v1.5',
4466 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4467 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4468 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4469 }
4470
4471 if vca.vcloud_session and vca.vcloud_session.organization:
4472 response = Http.get(url=vapp_tempalte_href,
4473 headers=vca.vcloud_session.get_vcloud_headers(),
4474 verify=vca.verify,
4475 logger=vca.logger
4476 )
4477
4478 if response.status_code != requests.codes.ok:
4479 self.logger.debug("REST API call {} failed. Return status code {}".format(
4480 vapp_tempalte_href, response.status_code))
4481
4482 else:
4483 xmlroot_respond = XmlElementTree.fromstring(response.content)
4484 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4485 if children_section is not None:
4486 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4487 if vCloud_extension_section is not None:
4488 vm_vcenter_info = {}
4489 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4490 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4491 if vmext is not None:
4492 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4493 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4494
4495 except Exception as exp :
4496 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4497
4498 return parsed_response
4499
4500
4501 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4502 """
4503 Method to delete vApp
4504 Args :
4505 vapp_uuid - vApp UUID
4506 msg - Error message to be logged
4507 exp_type : Exception type
4508 Returns:
4509 None
4510 """
4511 if vapp_uuid:
4512 status = self.delete_vminstance(vapp_uuid)
4513 else:
4514 msg = "No vApp ID"
4515 self.logger.error(msg)
4516 if exp_type == "Genric":
4517 raise vimconn.vimconnException(msg)
4518 elif exp_type == "NotFound":
4519 raise vimconn.vimconnNotFoundException(message=msg)
4520
4521 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4522 """
4523 Method to attach SRIOV adapters to VM
4524
4525 Args:
4526 vapp_uuid - uuid of vApp/VM
4527 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4528 vmname_andid - vmname
4529
4530 Returns:
4531 The status of add SRIOV adapter task , vm object and
4532 vcenter_conect object
4533 """
4534 vm_obj = None
4535 vcenter_conect, content = self.get_vcenter_content()
4536 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4537
4538 if vm_moref_id:
4539 try:
4540 no_of_sriov_devices = len(sriov_nets)
4541 if no_of_sriov_devices > 0:
4542 #Get VM and its host
4543 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4544 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4545 if host_obj and vm_obj:
4546 #get SRIOV devies from host on which vapp is currently installed
4547 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4548 no_of_sriov_devices,
4549 )
4550
4551 if len(avilable_sriov_devices) == 0:
4552 #find other hosts with active pci devices
4553 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4554 content,
4555 no_of_sriov_devices,
4556 )
4557
4558 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4559 #Migrate vm to the host where SRIOV devices are available
4560 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4561 new_host_obj))
4562 task = self.relocate_vm(new_host_obj, vm_obj)
4563 if task is not None:
4564 result = self.wait_for_vcenter_task(task, vcenter_conect)
4565 self.logger.info("Migrate VM status: {}".format(result))
4566 host_obj = new_host_obj
4567 else:
4568 self.logger.info("Fail to migrate VM : {}".format(result))
4569 raise vimconn.vimconnNotFoundException(
4570 "Fail to migrate VM : {} to host {}".format(
4571 vmname_andid,
4572 new_host_obj)
4573 )
4574
4575 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4576 #Add SRIOV devices one by one
4577 for sriov_net in sriov_nets:
4578 network_name = sriov_net.get('net_id')
4579 dvs_portgr_name = self.create_dvPort_group(network_name)
4580 if sriov_net.get('type') == "VF":
4581 #add vlan ID ,Modify portgroup for vlan ID
4582 self.configure_vlanID(content, vcenter_conect, network_name)
4583
4584 task = self.add_sriov_to_vm(content,
4585 vm_obj,
4586 host_obj,
4587 network_name,
4588 avilable_sriov_devices[0]
4589 )
4590 if task:
4591 status= self.wait_for_vcenter_task(task, vcenter_conect)
4592 if status:
4593 self.logger.info("Added SRIOV {} to VM {}".format(
4594 no_of_sriov_devices,
4595 str(vm_obj)))
4596 else:
4597 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4598 no_of_sriov_devices,
4599 str(vm_obj)))
4600 raise vimconn.vimconnUnexpectedResponse(
4601 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4602 )
4603 return True, vm_obj, vcenter_conect
4604 else:
4605 self.logger.error("Currently there is no host with"\
4606 " {} number of avaialble SRIOV "\
4607 "VFs required for VM {}".format(
4608 no_of_sriov_devices,
4609 vmname_andid)
4610 )
4611 raise vimconn.vimconnNotFoundException(
4612 "Currently there is no host with {} "\
4613 "number of avaialble SRIOV devices required for VM {}".format(
4614 no_of_sriov_devices,
4615 vmname_andid))
4616 else:
4617 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4618
4619 except vmodl.MethodFault as error:
4620 self.logger.error("Error occurred while adding SRIOV {} ",error)
4621 return None, vm_obj, vcenter_conect
4622
4623
4624 def get_sriov_devices(self,host, no_of_vfs):
4625 """
4626 Method to get the details of SRIOV devices on given host
4627 Args:
4628 host - vSphere host object
4629 no_of_vfs - number of VFs needed on host
4630
4631 Returns:
4632 array of SRIOV devices
4633 """
4634 sriovInfo=[]
4635 if host:
4636 for device in host.config.pciPassthruInfo:
4637 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4638 if device.numVirtualFunction >= no_of_vfs:
4639 sriovInfo.append(device)
4640 break
4641 return sriovInfo
4642
4643
4644 def get_host_and_sriov_devices(self, content, no_of_vfs):
4645 """
4646 Method to get the details of SRIOV devices infromation on all hosts
4647
4648 Args:
4649 content - vSphere host object
4650 no_of_vfs - number of pci VFs needed on host
4651
4652 Returns:
4653 array of SRIOV devices and host object
4654 """
4655 host_obj = None
4656 sriov_device_objs = None
4657 try:
4658 if content:
4659 container = content.viewManager.CreateContainerView(content.rootFolder,
4660 [vim.HostSystem], True)
4661 for host in container.view:
4662 devices = self.get_sriov_devices(host, no_of_vfs)
4663 if devices:
4664 host_obj = host
4665 sriov_device_objs = devices
4666 break
4667 except Exception as exp:
4668 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4669
4670 return host_obj,sriov_device_objs
4671
4672
4673 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4674 """
4675 Method to add SRIOV adapter to vm
4676
4677 Args:
4678 host_obj - vSphere host object
4679 vm_obj - vSphere vm object
4680 content - vCenter content object
4681 network_name - name of distributed virtaul portgroup
4682 sriov_device - SRIOV device info
4683
4684 Returns:
4685 task object
4686 """
4687 devices = []
4688 vnic_label = "sriov nic"
4689 try:
4690 dvs_portgr = self.get_dvport_group(network_name)
4691 network_name = dvs_portgr.name
4692 nic = vim.vm.device.VirtualDeviceSpec()
4693 # VM device
4694 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4695 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4696 nic.device.addressType = 'assigned'
4697 #nic.device.key = 13016
4698 nic.device.deviceInfo = vim.Description()
4699 nic.device.deviceInfo.label = vnic_label
4700 nic.device.deviceInfo.summary = network_name
4701 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4702
4703 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4704 nic.device.backing.deviceName = network_name
4705 nic.device.backing.useAutoDetect = False
4706 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4707 nic.device.connectable.startConnected = True
4708 nic.device.connectable.allowGuestControl = True
4709
4710 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4711 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4712 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4713
4714 devices.append(nic)
4715 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4716 task = vm_obj.ReconfigVM_Task(vmconf)
4717 return task
4718 except Exception as exp:
4719 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4720 return None
4721
4722
4723 def create_dvPort_group(self, network_name):
4724 """
4725 Method to create disributed virtual portgroup
4726
4727 Args:
4728 network_name - name of network/portgroup
4729
4730 Returns:
4731 portgroup key
4732 """
4733 try:
4734 new_network_name = [network_name, '-', str(uuid.uuid4())]
4735 network_name=''.join(new_network_name)
4736 vcenter_conect, content = self.get_vcenter_content()
4737
4738 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4739 if dv_switch:
4740 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4741 dv_pg_spec.name = network_name
4742
4743 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4744 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4745 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4746 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4747 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4748 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4749
4750 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4751 self.wait_for_vcenter_task(task, vcenter_conect)
4752
4753 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4754 if dvPort_group:
4755 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4756 return dvPort_group.key
4757 else:
4758 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4759
4760 except Exception as exp:
4761 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4762 " : {}".format(network_name, exp))
4763 return None
4764
4765 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4766 """
4767 Method to reconfigure disributed virtual portgroup
4768
4769 Args:
4770 dvPort_group_name - name of disributed virtual portgroup
4771 content - vCenter content object
4772 config_info - disributed virtual portgroup configuration
4773
4774 Returns:
4775 task object
4776 """
4777 try:
4778 dvPort_group = self.get_dvport_group(dvPort_group_name)
4779 if dvPort_group:
4780 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4781 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4782 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4783 if "vlanID" in config_info:
4784 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4785 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4786
4787 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4788 return task
4789 else:
4790 return None
4791 except Exception as exp:
4792 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4793 " : {}".format(dvPort_group_name, exp))
4794 return None
4795
4796
4797 def destroy_dvport_group(self , dvPort_group_name):
4798 """
4799 Method to destroy disributed virtual portgroup
4800
4801 Args:
4802 network_name - name of network/portgroup
4803
4804 Returns:
4805 True if portgroup successfully got deleted else false
4806 """
4807 vcenter_conect, content = self.get_vcenter_content()
4808 try:
4809 status = None
4810 dvPort_group = self.get_dvport_group(dvPort_group_name)
4811 if dvPort_group:
4812 task = dvPort_group.Destroy_Task()
4813 status = self.wait_for_vcenter_task(task, vcenter_conect)
4814 return status
4815 except vmodl.MethodFault as exp:
4816 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4817 exp, dvPort_group_name))
4818 return None
4819
4820
4821 def get_dvport_group(self, dvPort_group_name):
4822 """
4823 Method to get disributed virtual portgroup
4824
4825 Args:
4826 network_name - name of network/portgroup
4827
4828 Returns:
4829 portgroup object
4830 """
4831 vcenter_conect, content = self.get_vcenter_content()
4832 dvPort_group = None
4833 try:
4834 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4835 for item in container.view:
4836 if item.key == dvPort_group_name:
4837 dvPort_group = item
4838 break
4839 return dvPort_group
4840 except vmodl.MethodFault as exp:
4841 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4842 exp, dvPort_group_name))
4843 return None
4844
4845 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4846 """
4847 Method to get disributed virtual portgroup vlanID
4848
4849 Args:
4850 network_name - name of network/portgroup
4851
4852 Returns:
4853 vlan ID
4854 """
4855 vlanId = None
4856 try:
4857 dvPort_group = self.get_dvport_group(dvPort_group_name)
4858 if dvPort_group:
4859 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4860 except vmodl.MethodFault as exp:
4861 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4862 exp, dvPort_group_name))
4863 return vlanId
4864
4865
4866 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4867 """
4868 Method to configure vlanID in disributed virtual portgroup vlanID
4869
4870 Args:
4871 network_name - name of network/portgroup
4872
4873 Returns:
4874 None
4875 """
4876 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4877 if vlanID == 0:
4878 #configure vlanID
4879 vlanID = self.genrate_vlanID(dvPort_group_name)
4880 config = {"vlanID":vlanID}
4881 task = self.reconfig_portgroup(content, dvPort_group_name,
4882 config_info=config)
4883 if task:
4884 status= self.wait_for_vcenter_task(task, vcenter_conect)
4885 if status:
4886 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4887 dvPort_group_name,vlanID))
4888 else:
4889 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4890 dvPort_group_name, vlanID))
4891
4892
4893 def genrate_vlanID(self, network_name):
4894 """
4895 Method to get unused vlanID
4896 Args:
4897 network_name - name of network/portgroup
4898 Returns:
4899 vlanID
4900 """
4901 vlan_id = None
4902 used_ids = []
4903 if self.config.get('vlanID_range') == None:
4904 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4905 "at config value before creating sriov network with vlan tag")
4906 if "used_vlanIDs" not in self.persistent_info:
4907 self.persistent_info["used_vlanIDs"] = {}
4908 else:
4909 used_ids = self.persistent_info["used_vlanIDs"].values()
4910
4911 for vlanID_range in self.config.get('vlanID_range'):
4912 start_vlanid , end_vlanid = vlanID_range.split("-")
4913 if start_vlanid > end_vlanid:
4914 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4915 vlanID_range))
4916
4917 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4918 if id not in used_ids:
4919 vlan_id = id
4920 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4921 return vlan_id
4922 if vlan_id is None:
4923 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4924
4925
4926 def get_obj(self, content, vimtype, name):
4927 """
4928 Get the vsphere object associated with a given text name
4929 """
4930 obj = None
4931 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4932 for item in container.view:
4933 if item.name == name:
4934 obj = item
4935 break
4936 return obj
4937
4938
4939 def insert_media_to_vm(self, vapp, image_id):
4940 """
4941 Method to insert media CD-ROM (ISO image) from catalog to vm.
4942 vapp - vapp object to get vm id
4943 Image_id - image id for cdrom to be inerted to vm
4944 """
4945 # create connection object
4946 vca = self.connect()
4947 try:
4948 # fetching catalog details
4949 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
4950 response = Http.get(url=rest_url,
4951 headers=vca.vcloud_session.get_vcloud_headers(),
4952 verify=vca.verify,
4953 logger=vca.logger)
4954
4955 if response.status_code != 200:
4956 self.logger.error("REST call {} failed reason : {}"\
4957 "status code : {}".format(url_rest_call,
4958 response.content,
4959 response.status_code))
4960 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
4961 "catalog details")
4962 # searching iso name and id
4963 iso_name,media_id = self.get_media_details(vca, response.content)
4964
4965 if iso_name and media_id:
4966 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4967 <ns6:MediaInsertOrEjectParams
4968 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
4969 <ns6:Media
4970 type="application/vnd.vmware.vcloud.media+xml"
4971 name="{}.iso"
4972 id="urn:vcloud:media:{}"
4973 href="https://{}/api/media/{}"/>
4974 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
4975 vca.host,media_id)
4976
4977 for vms in vapp._get_vms():
4978 vm_id = (vms.id).split(':')[-1]
4979
4980 headers = vca.vcloud_session.get_vcloud_headers()
4981 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
4982 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
4983
4984 response = Http.post(url=rest_url,
4985 headers=headers,
4986 data=data,
4987 verify=vca.verify,
4988 logger=vca.logger)
4989
4990 if response.status_code != 202:
4991 self.logger.error("Failed to insert CD-ROM to vm")
4992 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
4993 "ISO image to vm")
4994 else:
4995 task = taskType.parseString(response.content, True)
4996 if isinstance(task, GenericTask):
4997 vca.block_until_completed(task)
4998 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
4999 " image to vm {}".format(vm_id))
5000 except Exception as exp:
5001 self.logger.error("insert_media_to_vm() : exception occurred "\
5002 "while inserting media CD-ROM")
5003 raise vimconn.vimconnException(message=exp)
5004
5005
5006 def get_media_details(self, vca, content):
5007 """
5008 Method to get catalog item details
5009 vca - connection object
5010 content - Catalog details
5011 Return - Media name, media id
5012 """
5013 cataloghref_list = []
5014 try:
5015 if content:
5016 vm_list_xmlroot = XmlElementTree.fromstring(content)
5017 for child in vm_list_xmlroot.iter():
5018 if 'CatalogItem' in child.tag:
5019 cataloghref_list.append(child.attrib.get('href'))
5020 if cataloghref_list is not None:
5021 for href in cataloghref_list:
5022 if href:
5023 response = Http.get(url=href,
5024 headers=vca.vcloud_session.get_vcloud_headers(),
5025 verify=vca.verify,
5026 logger=vca.logger)
5027 if response.status_code != 200:
5028 self.logger.error("REST call {} failed reason : {}"\
5029 "status code : {}".format(href,
5030 response.content,
5031 response.status_code))
5032 raise vimconn.vimconnException("get_media_details : Failed to get "\
5033 "catalogitem details")
5034 list_xmlroot = XmlElementTree.fromstring(response.content)
5035 for child in list_xmlroot.iter():
5036 if 'Entity' in child.tag:
5037 if 'media' in child.attrib.get('href'):
5038 name = child.attrib.get('name')
5039 media_id = child.attrib.get('href').split('/').pop()
5040 return name,media_id
5041 else:
5042 self.logger.debug("Media name and id not found")
5043 return False,False
5044 except Exception as exp:
5045 self.logger.error("get_media_details : exception occurred "\
5046 "getting media details")
5047 raise vimconn.vimconnException(message=exp)
5048
5049
5050 def retry_rest(self, method, url, add_headers=None, data=None):
5051 """ Method to get Token & retry respective REST request
5052 Args:
5053 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5054 url - request url to be used
5055 add_headers - Additional headers (optional)
5056 data - Request payload data to be passed in request
5057 Returns:
5058 response - Response of request
5059 """
5060 response = None
5061
5062 #Get token
5063 self.get_token()
5064
5065 headers=self.vca.vcloud_session.get_vcloud_headers()
5066
5067 if add_headers:
5068 headers.update(add_headers)
5069
5070 if method == 'GET':
5071 response = Http.get(url=url,
5072 headers=headers,
5073 verify=self.vca.verify,
5074 logger=self.vca.logger)
5075 elif method == 'PUT':
5076 response = Http.put(url=url,
5077 data=data,
5078 headers=headers,
5079 verify=self.vca.verify,
5080 logger=self.logger)
5081 elif method == 'POST':
5082 response = Http.post(url=url,
5083 headers=headers,
5084 data=data,
5085 verify=self.vca.verify,
5086 logger=self.vca.logger)
5087 elif method == 'DELETE':
5088 response = Http.delete(url=url,
5089 headers=headers,
5090 verify=self.vca.verify,
5091 logger=self.vca.logger)
5092 return response
5093
5094
5095 def get_token(self):
5096 """ Generate a new token if expired
5097
5098 Returns:
5099 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5100 """
5101 vca = None
5102
5103 try:
5104 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5105 self.user,
5106 self.org_name))
5107 vca = VCA(host=self.url,
5108 username=self.user,
5109 service_type=STANDALONE,
5110 version=VCAVERSION,
5111 verify=False,
5112 log=False)
5113
5114 result = vca.login(password=self.passwd, org=self.org_name)
5115 if result is True:
5116 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5117 if result is True:
5118 self.logger.info(
5119 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5120 #Update vca
5121 self.vca = vca
5122 return
5123
5124 except:
5125 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5126 "{} as user: {}".format(self.org_name, self.user))
5127
5128 if not vca or not result:
5129 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5130
5131
5132 def get_vdc_details(self):
5133 """ Get VDC details using pyVcloud Lib
5134
5135 Returns vdc object
5136 """
5137 vdc = self.vca.get_vdc(self.tenant_name)
5138
5139 #Retry once, if failed by refreshing token
5140 if vdc is None:
5141 self.get_token()
5142 vdc = self.vca.get_vdc(self.tenant_name)
5143
5144 return vdc
5145
5146