ad4584476fa29472a92ad4c14a05e1de5fb456db
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 for network in networks:
675 vdc_network_id = network.get_id().split(":")
676 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
677 filter_dict["name"] = network.get_name()
678 filter_dict["id"] = vdc_network_id[3]
679 filter_dict["shared"] = network.get_IsShared()
680 filter_dict["tenant_id"] = vdc_id
681 if network.get_status() == 1:
682 filter_dict["admin_state_up"] = True
683 else:
684 filter_dict["admin_state_up"] = False
685 filter_dict["status"] = "ACTIVE"
686 filter_dict["type"] = "bridge"
687 self.logger.debug("Returning {}".format(filter_dict))
688 return filter_dict
689 except:
690 self.logger.debug("Error in get_network")
691 self.logger.debug(traceback.format_exc())
692
693 return filter_dict
694
695 def delete_network(self, net_id):
696 """
697 Method Deletes a tenant network from VIM, provide the network id.
698
699 Returns the network identifier or raise an exception
700 """
701
702 # ############# Stub code for SRIOV #################
703 # dvport_group = self.get_dvport_group(net_id)
704 # if dvport_group:
705 # #delete portgroup
706 # status = self.destroy_dvport_group(net_id)
707 # if status:
708 # # Remove vlanID from persistent info
709 # if net_id in self.persistent_info["used_vlanIDs"]:
710 # del self.persistent_info["used_vlanIDs"][net_id]
711 #
712 # return net_id
713
714 vcd_network = self.get_vcd_network(network_uuid=net_id)
715 if vcd_network is not None and vcd_network:
716 if self.delete_network_action(network_uuid=net_id):
717 return net_id
718 else:
719 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
720
721 def refresh_nets_status(self, net_list):
722 """Get the status of the networks
723 Params: the list of network identifiers
724 Returns a dictionary with:
725 net_id: #VIM id of this network
726 status: #Mandatory. Text with one of:
727 # DELETED (not found at vim)
728 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
729 # OTHER (Vim reported other status not understood)
730 # ERROR (VIM indicates an ERROR status)
731 # ACTIVE, INACTIVE, DOWN (admin down),
732 # BUILD (on building process)
733 #
734 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
735 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
736
737 """
738
739 dict_entry = {}
740 try:
741 for net in net_list:
742 errormsg = ''
743 vcd_network = self.get_vcd_network(network_uuid=net)
744 if vcd_network is not None and vcd_network:
745 if vcd_network['status'] == '1':
746 status = 'ACTIVE'
747 else:
748 status = 'DOWN'
749 else:
750 status = 'DELETED'
751 errormsg = 'Network not found.'
752
753 dict_entry[net] = {'status': status, 'error_msg': errormsg,
754 'vim_info': yaml.safe_dump(vcd_network)}
755 except:
756 self.logger.debug("Error in refresh_nets_status")
757 self.logger.debug(traceback.format_exc())
758
759 return dict_entry
760
761 def get_flavor(self, flavor_id):
762 """Obtain flavor details from the VIM
763 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
764 """
765 if flavor_id not in vimconnector.flavorlist:
766 raise vimconn.vimconnNotFoundException("Flavor not found.")
767 return vimconnector.flavorlist[flavor_id]
768
769 def new_flavor(self, flavor_data):
770 """Adds a tenant flavor to VIM
771 flavor_data contains a dictionary with information, keys:
772 name: flavor name
773 ram: memory (cloud type) in MBytes
774 vpcus: cpus (cloud type)
775 extended: EPA parameters
776 - numas: #items requested in same NUMA
777 memory: number of 1G huge pages memory
778 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
779 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
780 - name: interface name
781 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
782 bandwidth: X Gbps; requested guarantee bandwidth
783 vpci: requested virtual PCI address
784 disk: disk size
785 is_public:
786 #TODO to concrete
787 Returns the flavor identifier"""
788
789 # generate a new uuid put to internal dict and return it.
790 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
791 new_flavor=flavor_data
792 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
793 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
794 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
795
796 if not isinstance(ram, int):
797 raise vimconn.vimconnException("Non-integer value for ram")
798 elif not isinstance(cpu, int):
799 raise vimconn.vimconnException("Non-integer value for cpu")
800 elif not isinstance(disk, int):
801 raise vimconn.vimconnException("Non-integer value for disk")
802
803 extended_flv = flavor_data.get("extended")
804 if extended_flv:
805 numas=extended_flv.get("numas")
806 if numas:
807 for numa in numas:
808 #overwrite ram and vcpus
809 ram = numa['memory']*1024
810 if 'paired-threads' in numa:
811 cpu = numa['paired-threads']*2
812 elif 'cores' in numa:
813 cpu = numa['cores']
814 elif 'threads' in numa:
815 cpu = numa['threads']
816
817 new_flavor[FLAVOR_RAM_KEY] = ram
818 new_flavor[FLAVOR_VCPUS_KEY] = cpu
819 new_flavor[FLAVOR_DISK_KEY] = disk
820 # generate a new uuid put to internal dict and return it.
821 flavor_id = uuid.uuid4()
822 vimconnector.flavorlist[str(flavor_id)] = new_flavor
823 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
824
825 return str(flavor_id)
826
827 def delete_flavor(self, flavor_id):
828 """Deletes a tenant flavor from VIM identify by its id
829
830 Returns the used id or raise an exception
831 """
832 if flavor_id not in vimconnector.flavorlist:
833 raise vimconn.vimconnNotFoundException("Flavor not found.")
834
835 vimconnector.flavorlist.pop(flavor_id, None)
836 return flavor_id
837
838 def new_image(self, image_dict):
839 """
840 Adds a tenant image to VIM
841 Returns:
842 200, image-id if the image is created
843 <0, message if there is an error
844 """
845
846 return self.get_image_id_from_path(image_dict['location'])
847
848 def delete_image(self, image_id):
849 """
850 Deletes a tenant image from VIM
851 Args:
852 image_id is ID of Image to be deleted
853 Return:
854 returns the image identifier in UUID format or raises an exception on error
855 """
856 vca = self.connect_as_admin()
857 if not vca:
858 raise vimconn.vimconnConnectionException("self.connect() is failed")
859 # Get Catalog details
860 url_list = [self.vca.host, '/api/catalog/', image_id]
861 catalog_herf = ''.join(url_list)
862 response = Http.get(url=catalog_herf,
863 headers=vca.vcloud_session.get_vcloud_headers(),
864 verify=vca.verify,
865 logger=vca.logger)
866
867 if response.status_code != requests.codes.ok:
868 self.logger.debug("delete_image():GET REST API call {} failed. "\
869 "Return status code {}".format(catalog_herf,
870 response.status_code))
871 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
872
873 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
874 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
875 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
876
877 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
878 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
879 for catalogItem in catalogItems:
880 catalogItem_href = catalogItem.attrib['href']
881
882 #GET details of catalogItem
883 response = Http.get(url=catalogItem_href,
884 headers=vca.vcloud_session.get_vcloud_headers(),
885 verify=vca.verify,
886 logger=vca.logger)
887
888 if response.status_code != requests.codes.ok:
889 self.logger.debug("delete_image():GET REST API call {} failed. "\
890 "Return status code {}".format(catalog_herf,
891 response.status_code))
892 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
893 catalogItem,
894 image_id))
895
896 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
897 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
898 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
899 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
900
901 #Remove catalogItem
902 response = Http.delete(url= catalogitem_remove_href,
903 headers=vca.vcloud_session.get_vcloud_headers(),
904 verify=vca.verify,
905 logger=vca.logger)
906 if response.status_code == requests.codes.no_content:
907 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
908 else:
909 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
910
911 #Remove catalog
912 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
913 catalog_remove_herf = ''.join(url_list)
914 response = Http.delete(url= catalog_remove_herf,
915 headers=vca.vcloud_session.get_vcloud_headers(),
916 verify=vca.verify,
917 logger=vca.logger)
918
919 if response.status_code == requests.codes.no_content:
920 self.logger.debug("Deleted Catalog {}".format(image_id))
921 return image_id
922 else:
923 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
924
925
926 def catalog_exists(self, catalog_name, catalogs):
927 """
928
929 :param catalog_name:
930 :param catalogs:
931 :return:
932 """
933 for catalog in catalogs:
934 if catalog.name == catalog_name:
935 return True
936 return False
937
938 def create_vimcatalog(self, vca=None, catalog_name=None):
939 """ Create new catalog entry in vCloud director.
940
941 Args
942 vca: vCloud director.
943 catalog_name catalog that client wish to create. Note no validation done for a name.
944 Client must make sure that provide valid string representation.
945
946 Return (bool) True if catalog created.
947
948 """
949 try:
950 task = vca.create_catalog(catalog_name, catalog_name)
951 result = vca.block_until_completed(task)
952 if not result:
953 return False
954 catalogs = vca.get_catalogs()
955 except:
956 return False
957 return self.catalog_exists(catalog_name, catalogs)
958
959 # noinspection PyIncorrectDocstring
960 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
961 description='', progress=False, chunk_bytes=128 * 1024):
962 """
963 Uploads a OVF file to a vCloud catalog
964
965 :param chunk_bytes:
966 :param progress:
967 :param description:
968 :param image_name:
969 :param vca:
970 :param catalog_name: (str): The name of the catalog to upload the media.
971 :param media_file_name: (str): The name of the local media file to upload.
972 :return: (bool) True if the media file was successfully uploaded, false otherwise.
973 """
974 os.path.isfile(media_file_name)
975 statinfo = os.stat(media_file_name)
976
977 # find a catalog entry where we upload OVF.
978 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
979 # status change.
980 # if VCD can parse OVF we upload VMDK file
981 try:
982 for catalog in vca.get_catalogs():
983 if catalog_name != catalog.name:
984 continue
985 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
986 link.get_rel() == 'add', catalog.get_Link())
987 assert len(link) == 1
988 data = """
989 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
990 """ % (escape(catalog_name), escape(description))
991 headers = vca.vcloud_session.get_vcloud_headers()
992 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
993 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
994 if response.status_code == requests.codes.created:
995 catalogItem = XmlElementTree.fromstring(response.content)
996 entity = [child for child in catalogItem if
997 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
998 href = entity.get('href')
999 template = href
1000 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1001 verify=vca.verify, logger=self.logger)
1002
1003 if response.status_code == requests.codes.ok:
1004 media = mediaType.parseString(response.content, True)
1005 link = filter(lambda link: link.get_rel() == 'upload:default',
1006 media.get_Files().get_File()[0].get_Link())[0]
1007 headers = vca.vcloud_session.get_vcloud_headers()
1008 headers['Content-Type'] = 'Content-Type text/xml'
1009 response = Http.put(link.get_href(),
1010 data=open(media_file_name, 'rb'),
1011 headers=headers,
1012 verify=vca.verify, logger=self.logger)
1013 if response.status_code != requests.codes.ok:
1014 self.logger.debug(
1015 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1016 media_file_name))
1017 return False
1018
1019 # TODO fix this with aync block
1020 time.sleep(5)
1021
1022 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1023
1024 # uploading VMDK file
1025 # check status of OVF upload and upload remaining files.
1026 response = Http.get(template,
1027 headers=vca.vcloud_session.get_vcloud_headers(),
1028 verify=vca.verify,
1029 logger=self.logger)
1030
1031 if response.status_code == requests.codes.ok:
1032 media = mediaType.parseString(response.content, True)
1033 number_of_files = len(media.get_Files().get_File())
1034 for index in xrange(0, number_of_files):
1035 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1036 media.get_Files().get_File()[index].get_Link())
1037 for link in links_list:
1038 # we skip ovf since it already uploaded.
1039 if 'ovf' in link.get_href():
1040 continue
1041 # The OVF file and VMDK must be in a same directory
1042 head, tail = os.path.split(media_file_name)
1043 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1044 if not os.path.isfile(file_vmdk):
1045 return False
1046 statinfo = os.stat(file_vmdk)
1047 if statinfo.st_size == 0:
1048 return False
1049 hrefvmdk = link.get_href()
1050
1051 if progress:
1052 print("Uploading file: {}".format(file_vmdk))
1053 if progress:
1054 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1055 FileTransferSpeed()]
1056 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1057
1058 bytes_transferred = 0
1059 f = open(file_vmdk, 'rb')
1060 while bytes_transferred < statinfo.st_size:
1061 my_bytes = f.read(chunk_bytes)
1062 if len(my_bytes) <= chunk_bytes:
1063 headers = vca.vcloud_session.get_vcloud_headers()
1064 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1065 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1066 headers['Content-Length'] = str(len(my_bytes))
1067 response = Http.put(hrefvmdk,
1068 headers=headers,
1069 data=my_bytes,
1070 verify=vca.verify,
1071 logger=None)
1072
1073 if response.status_code == requests.codes.ok:
1074 bytes_transferred += len(my_bytes)
1075 if progress:
1076 progress_bar.update(bytes_transferred)
1077 else:
1078 self.logger.debug(
1079 'file upload failed with error: [%s] %s' % (response.status_code,
1080 response.content))
1081
1082 f.close()
1083 return False
1084 f.close()
1085 if progress:
1086 progress_bar.finish()
1087 time.sleep(10)
1088 return True
1089 else:
1090 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1091 format(catalog_name, media_file_name))
1092 return False
1093 except Exception as exp:
1094 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1095 .format(catalog_name,media_file_name, exp))
1096 raise vimconn.vimconnException(
1097 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1098 .format(catalog_name,media_file_name, exp))
1099
1100 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1101 return False
1102
1103 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1104 """Upload media file"""
1105 # TODO add named parameters for readability
1106
1107 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1108 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1109
1110 def validate_uuid4(self, uuid_string=None):
1111 """ Method validate correct format of UUID.
1112
1113 Return: true if string represent valid uuid
1114 """
1115 try:
1116 val = uuid.UUID(uuid_string, version=4)
1117 except ValueError:
1118 return False
1119 return True
1120
1121 def get_catalogid(self, catalog_name=None, catalogs=None):
1122 """ Method check catalog and return catalog ID in UUID format.
1123
1124 Args
1125 catalog_name: catalog name as string
1126 catalogs: list of catalogs.
1127
1128 Return: catalogs uuid
1129 """
1130
1131 for catalog in catalogs:
1132 if catalog.name == catalog_name:
1133 catalog_id = catalog.get_id().split(":")
1134 return catalog_id[3]
1135 return None
1136
1137 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1138 """ Method check catalog and return catalog name lookup done by catalog UUID.
1139
1140 Args
1141 catalog_name: catalog name as string
1142 catalogs: list of catalogs.
1143
1144 Return: catalogs name or None
1145 """
1146
1147 if not self.validate_uuid4(uuid_string=catalog_uuid):
1148 return None
1149
1150 for catalog in catalogs:
1151 catalog_id = catalog.get_id().split(":")[3]
1152 if catalog_id == catalog_uuid:
1153 return catalog.name
1154 return None
1155
1156 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1157 """ Method check catalog and return catalog name lookup done by catalog UUID.
1158
1159 Args
1160 catalog_name: catalog name as string
1161 catalogs: list of catalogs.
1162
1163 Return: catalogs name or None
1164 """
1165
1166 if not self.validate_uuid4(uuid_string=catalog_uuid):
1167 return None
1168
1169 for catalog in catalogs:
1170 catalog_id = catalog.get_id().split(":")[3]
1171 if catalog_id == catalog_uuid:
1172 return catalog
1173 return None
1174
1175 def get_image_id_from_path(self, path=None, progress=False):
1176 """ Method upload OVF image to vCloud director.
1177
1178 Each OVF image represented as single catalog entry in vcloud director.
1179 The method check for existing catalog entry. The check done by file name without file extension.
1180
1181 if given catalog name already present method will respond with existing catalog uuid otherwise
1182 it will create new catalog entry and upload OVF file to newly created catalog.
1183
1184 If method can't create catalog entry or upload a file it will throw exception.
1185
1186 Method accept boolean flag progress that will output progress bar. It useful method
1187 for standalone upload use case. In case to test large file upload.
1188
1189 Args
1190 path: - valid path to OVF file.
1191 progress - boolean progress bar show progress bar.
1192
1193 Return: if image uploaded correct method will provide image catalog UUID.
1194 """
1195
1196 if not path:
1197 raise vimconn.vimconnException("Image path can't be None.")
1198
1199 if not os.path.isfile(path):
1200 raise vimconn.vimconnException("Can't read file. File not found.")
1201
1202 if not os.access(path, os.R_OK):
1203 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1204
1205 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1206
1207 dirpath, filename = os.path.split(path)
1208 flname, file_extension = os.path.splitext(path)
1209 if file_extension != '.ovf':
1210 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1211 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1212
1213 catalog_name = os.path.splitext(filename)[0]
1214 catalog_md5_name = hashlib.md5(path).hexdigest()
1215 self.logger.debug("File name {} Catalog Name {} file path {} "
1216 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1217
1218 try:
1219 catalogs = self.vca.get_catalogs()
1220 except Exception as exp:
1221 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1222 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1223
1224 if len(catalogs) == 0:
1225 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1226 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1227 if not result:
1228 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1229 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1230 media_name=filename, medial_file_name=path, progress=progress)
1231 if not result:
1232 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1233 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1234 else:
1235 for catalog in catalogs:
1236 # search for existing catalog if we find same name we return ID
1237 # TODO optimize this
1238 if catalog.name == catalog_md5_name:
1239 self.logger.debug("Found existing catalog entry for {} "
1240 "catalog id {}".format(catalog_name,
1241 self.get_catalogid(catalog_md5_name, catalogs)))
1242 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1243
1244 # if we didn't find existing catalog we create a new one and upload image.
1245 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1246 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1247 if not result:
1248 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1249
1250 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1251 media_name=filename, medial_file_name=path, progress=progress)
1252 if not result:
1253 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1254
1255 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1256
1257 def get_image_list(self, filter_dict={}):
1258 '''Obtain tenant images from VIM
1259 Filter_dict can be:
1260 name: image name
1261 id: image uuid
1262 checksum: image checksum
1263 location: image path
1264 Returns the image list of dictionaries:
1265 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1266 List can be empty
1267 '''
1268
1269 try:
1270 image_list = []
1271 catalogs = self.vca.get_catalogs()
1272 if len(catalogs) == 0:
1273 return image_list
1274 else:
1275 for catalog in catalogs:
1276 catalog_uuid = catalog.get_id().split(":")[3]
1277 name = catalog.name
1278 filtered_dict = {}
1279 if filter_dict.get("name") and filter_dict["name"] != name:
1280 continue
1281 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1282 continue
1283 filtered_dict ["name"] = name
1284 filtered_dict ["id"] = catalog_uuid
1285 image_list.append(filtered_dict)
1286
1287 self.logger.debug("List of already created catalog items: {}".format(image_list))
1288 return image_list
1289 except Exception as exp:
1290 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1291
1292 def get_vappid(self, vdc=None, vapp_name=None):
1293 """ Method takes vdc object and vApp name and returns vapp uuid or None
1294
1295 Args:
1296 vdc: The VDC object.
1297 vapp_name: is application vappp name identifier
1298
1299 Returns:
1300 The return vApp name otherwise None
1301 """
1302 if vdc is None or vapp_name is None:
1303 return None
1304 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1305 try:
1306 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1307 vdc.ResourceEntities.ResourceEntity)
1308 if len(refs) == 1:
1309 return refs[0].href.split("vapp")[1][1:]
1310 except Exception as e:
1311 self.logger.exception(e)
1312 return False
1313 return None
1314
1315 def check_vapp(self, vdc=None, vapp_uuid=None):
1316 """ Method Method returns True or False if vapp deployed in vCloud director
1317
1318 Args:
1319 vca: Connector to VCA
1320 vdc: The VDC object.
1321 vappid: vappid is application identifier
1322
1323 Returns:
1324 The return True if vApp deployed
1325 :param vdc:
1326 :param vapp_uuid:
1327 """
1328 try:
1329 refs = filter(lambda ref:
1330 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1331 vdc.ResourceEntities.ResourceEntity)
1332 for ref in refs:
1333 vappid = ref.href.split("vapp")[1][1:]
1334 # find vapp with respected vapp uuid
1335 if vappid == vapp_uuid:
1336 return True
1337 except Exception as e:
1338 self.logger.exception(e)
1339 return False
1340 return False
1341
1342 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1343 """Method returns vApp name from vCD and lookup done by vapp_id.
1344
1345 Args:
1346 vca: Connector to VCA
1347 vdc: The VDC object.
1348 vapp_uuid: vappid is application identifier
1349
1350 Returns:
1351 The return vApp name otherwise None
1352 """
1353
1354 try:
1355 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1356 vdc.ResourceEntities.ResourceEntity)
1357 for ref in refs:
1358 # we care only about UUID the rest doesn't matter
1359 vappid = ref.href.split("vapp")[1][1:]
1360 if vappid == vapp_uuid:
1361 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1362 logger=self.logger)
1363
1364 #Retry login if session expired & retry sending request
1365 if response.status_code == 403:
1366 response = self.retry_rest('GET', ref.href)
1367
1368 tree = XmlElementTree.fromstring(response.content)
1369 return tree.attrib['name']
1370 except Exception as e:
1371 self.logger.exception(e)
1372 return None
1373 return None
1374
1375 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1376 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1377 """Adds a VM instance to VIM
1378 Params:
1379 'start': (boolean) indicates if VM must start or created in pause mode.
1380 'image_id','flavor_id': image and flavor VIM id to use for the VM
1381 'net_list': list of interfaces, each one is a dictionary with:
1382 'name': (optional) name for the interface.
1383 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1384 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1385 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1386 'mac_address': (optional) mac address to assign to this interface
1387 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1388 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1389 'type': (mandatory) can be one of:
1390 'virtual', in this case always connected to a network of type 'net_type=bridge'
1391 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1392 can created unconnected
1393 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1394 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1395 are allocated on the same physical NIC
1396 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1397 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1398 or True, it must apply the default VIM behaviour
1399 After execution the method will add the key:
1400 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1401 interface. 'net_list' is modified
1402 'cloud_config': (optional) dictionary with:
1403 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1404 'users': (optional) list of users to be inserted, each item is a dict with:
1405 'name': (mandatory) user name,
1406 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1407 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1408 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1409 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1410 'dest': (mandatory) string with the destination absolute path
1411 'encoding': (optional, by default text). Can be one of:
1412 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1413 'content' (mandatory): string with the content of the file
1414 'permissions': (optional) string with file permissions, typically octal notation '0644'
1415 'owner': (optional) file owner, string with the format 'owner:group'
1416 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1417 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1418 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1419 'size': (mandatory) string with the size of the disk in GB
1420 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1421 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1422 availability_zone_index is None
1423 Returns the instance identifier or raises an exception on error
1424 """
1425 self.logger.info("Creating new instance for entry {}".format(name))
1426 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1427 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1428
1429 #new vm name = vmname + tenant_id + uuid
1430 new_vm_name = [name, '-', str(uuid.uuid4())]
1431 vmname_andid = ''.join(new_vm_name)
1432
1433 # if vm already deployed we return existing uuid
1434 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1435 # if vapp_uuid is not None:
1436 # return vapp_uuid
1437
1438 # we check for presence of VDC, Catalog entry and Flavor.
1439 vdc = self.get_vdc_details()
1440 if vdc is None:
1441 raise vimconn.vimconnNotFoundException(
1442 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1443 catalogs = self.vca.get_catalogs()
1444 if catalogs is None:
1445 #Retry once, if failed by refreshing token
1446 self.get_token()
1447 catalogs = self.vca.get_catalogs()
1448 if catalogs is None:
1449 raise vimconn.vimconnNotFoundException(
1450 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1451
1452 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1453 if catalog_hash_name:
1454 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1455 else:
1456 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1457 "(Failed retrieve catalog information {})".format(name, image_id))
1458
1459
1460 # Set vCPU and Memory based on flavor.
1461 vm_cpus = None
1462 vm_memory = None
1463 vm_disk = None
1464 numas = None
1465
1466 if flavor_id is not None:
1467 if flavor_id not in vimconnector.flavorlist:
1468 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1469 "Failed retrieve flavor information "
1470 "flavor id {}".format(name, flavor_id))
1471 else:
1472 try:
1473 flavor = vimconnector.flavorlist[flavor_id]
1474 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1475 vm_memory = flavor[FLAVOR_RAM_KEY]
1476 vm_disk = flavor[FLAVOR_DISK_KEY]
1477 extended = flavor.get("extended", None)
1478 if extended:
1479 numas=extended.get("numas", None)
1480
1481 except Exception as exp:
1482 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1483
1484 # image upload creates template name as catalog name space Template.
1485 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1486 power_on = 'false'
1487 if start:
1488 power_on = 'true'
1489
1490 # client must provide at least one entry in net_list if not we report error
1491 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1492 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1493 primary_net = None
1494 primary_netname = None
1495 network_mode = 'bridged'
1496 if net_list is not None and len(net_list) > 0:
1497 for net in net_list:
1498 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1499 primary_net = net
1500 if primary_net is None:
1501 primary_net = net_list[0]
1502
1503 try:
1504 primary_net_id = primary_net['net_id']
1505 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1506 if 'name' in network_dict:
1507 primary_netname = network_dict['name']
1508
1509 except KeyError:
1510 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1511 else:
1512 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1513
1514 # use: 'data', 'bridge', 'mgmt'
1515 # create vApp. Set vcpu and ram based on flavor id.
1516 try:
1517 for retry in (1,2):
1518 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1519 self.get_catalogbyid(image_id, catalogs),
1520 network_name=None, # None while creating vapp
1521 network_mode=network_mode,
1522 vm_name=vmname_andid,
1523 vm_cpus=vm_cpus, # can be None if flavor is None
1524 vm_memory=vm_memory) # can be None if flavor is None
1525
1526 if not vapptask and retry==1:
1527 self.get_token() # Retry getting token
1528 continue
1529 else:
1530 break
1531
1532 if vapptask is None or vapptask is False:
1533 raise vimconn.vimconnUnexpectedResponse(
1534 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1535 if type(vapptask) is VappTask:
1536 self.vca.block_until_completed(vapptask)
1537
1538 except Exception as exp:
1539 raise vimconn.vimconnUnexpectedResponse(
1540 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1541
1542 # we should have now vapp in undeployed state.
1543 try:
1544 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1545
1546 except Exception as exp:
1547 raise vimconn.vimconnUnexpectedResponse(
1548 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1549 .format(vmname_andid, exp))
1550
1551 if vapp_uuid is None:
1552 raise vimconn.vimconnUnexpectedResponse(
1553 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1554 vmname_andid))
1555
1556 #Add PCI passthrough/SRIOV configrations
1557 vm_obj = None
1558 pci_devices_info = []
1559 sriov_net_info = []
1560 reserve_memory = False
1561
1562 for net in net_list:
1563 if net["type"]=="PF":
1564 pci_devices_info.append(net)
1565 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1566 sriov_net_info.append(net)
1567
1568 #Add PCI
1569 if len(pci_devices_info) > 0:
1570 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1571 vmname_andid ))
1572 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1573 pci_devices_info,
1574 vmname_andid)
1575 if PCI_devices_status:
1576 self.logger.info("Added PCI devives {} to VM {}".format(
1577 pci_devices_info,
1578 vmname_andid)
1579 )
1580 reserve_memory = True
1581 else:
1582 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1583 pci_devices_info,
1584 vmname_andid)
1585 )
1586
1587 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1588 # Modify vm disk
1589 if vm_disk:
1590 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1591 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1592 if result :
1593 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1594
1595 #Add new or existing disks to vApp
1596 if disk_list:
1597 added_existing_disk = False
1598 for disk in disk_list:
1599 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1600 image_id = disk['image_id']
1601 # Adding CD-ROM to VM
1602 # will revisit code once specification ready to support this feature
1603 self.insert_media_to_vm(vapp, image_id)
1604 elif "image_id" in disk and disk["image_id"] is not None:
1605 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1606 disk["image_id"] , vapp_uuid))
1607 self.add_existing_disk(catalogs=catalogs,
1608 image_id=disk["image_id"],
1609 size = disk["size"],
1610 template_name=templateName,
1611 vapp_uuid=vapp_uuid
1612 )
1613 added_existing_disk = True
1614 else:
1615 #Wait till added existing disk gets reflected into vCD database/API
1616 if added_existing_disk:
1617 time.sleep(5)
1618 added_existing_disk = False
1619 self.add_new_disk(vapp_uuid, disk['size'])
1620
1621 if numas:
1622 # Assigning numa affinity setting
1623 for numa in numas:
1624 if 'paired-threads-id' in numa:
1625 paired_threads_id = numa['paired-threads-id']
1626 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1627
1628 # add NICs & connect to networks in netlist
1629 try:
1630 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1631 nicIndex = 0
1632 primary_nic_index = 0
1633 for net in net_list:
1634 # openmano uses network id in UUID format.
1635 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1636 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1637 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1638
1639 if 'net_id' not in net:
1640 continue
1641
1642 net_list['vim_id'] = net_list['net_id'] # Provide the same VIM identifier as the VIM network
1643
1644 interface_net_id = net['net_id']
1645 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1646 interface_network_mode = net['use']
1647
1648 if interface_network_mode == 'mgmt':
1649 primary_nic_index = nicIndex
1650
1651 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1652 - DHCP (The IP address is obtained from a DHCP service.)
1653 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1654 - NONE (No IP addressing mode specified.)"""
1655
1656 if primary_netname is not None:
1657 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1658 if len(nets) == 1:
1659 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1660
1661 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1662 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1663 if type(task) is GenericTask:
1664 self.vca.block_until_completed(task)
1665 # connect network to VM - with all DHCP by default
1666
1667 type_list = ['PF','VF','VFnotShared']
1668 if 'type' in net and net['type'] not in type_list:
1669 # fetching nic type from vnf
1670 if 'model' in net:
1671 nic_type = net['model']
1672 self.logger.info("new_vminstance(): adding network adapter "\
1673 "to a network {}".format(nets[0].name))
1674 self.add_network_adapter_to_vms(vapp, nets[0].name,
1675 primary_nic_index,
1676 nicIndex,
1677 net,
1678 nic_type=nic_type)
1679 else:
1680 self.logger.info("new_vminstance(): adding network adapter "\
1681 "to a network {}".format(nets[0].name))
1682 self.add_network_adapter_to_vms(vapp, nets[0].name,
1683 primary_nic_index,
1684 nicIndex,
1685 net)
1686 nicIndex += 1
1687
1688 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1689 # cloud-init for ssh-key injection
1690 if cloud_config:
1691 self.cloud_init(vapp,cloud_config)
1692
1693 # deploy and power on vm
1694 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1695 deploytask = vapp.deploy(powerOn=False)
1696 if type(deploytask) is GenericTask:
1697 self.vca.block_until_completed(deploytask)
1698
1699 # ############# Stub code for SRIOV #################
1700 #Add SRIOV
1701 # if len(sriov_net_info) > 0:
1702 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1703 # vmname_andid ))
1704 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1705 # sriov_net_info,
1706 # vmname_andid)
1707 # if sriov_status:
1708 # self.logger.info("Added SRIOV {} to VM {}".format(
1709 # sriov_net_info,
1710 # vmname_andid)
1711 # )
1712 # reserve_memory = True
1713 # else:
1714 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1715 # sriov_net_info,
1716 # vmname_andid)
1717 # )
1718
1719 # If VM has PCI devices or SRIOV reserve memory for VM
1720 if reserve_memory:
1721 memReserve = vm_obj.config.hardware.memoryMB
1722 spec = vim.vm.ConfigSpec()
1723 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1724 task = vm_obj.ReconfigVM_Task(spec=spec)
1725 if task:
1726 result = self.wait_for_vcenter_task(task, vcenter_conect)
1727 self.logger.info("Reserved memory {} MB for "
1728 "VM VM status: {}".format(str(memReserve), result))
1729 else:
1730 self.logger.info("Fail to reserved memory {} to VM {}".format(
1731 str(memReserve), str(vm_obj)))
1732
1733 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1734
1735 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1736 poweron_task = vapp.poweron()
1737 if type(poweron_task) is GenericTask:
1738 self.vca.block_until_completed(poweron_task)
1739
1740 except Exception as exp :
1741 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1742 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1743 .format(name, exp))
1744 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1745 .format(name, exp))
1746
1747 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1748 wait_time = 0
1749 vapp_uuid = None
1750 while wait_time <= MAX_WAIT_TIME:
1751 try:
1752 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1753 except Exception as exp:
1754 raise vimconn.vimconnUnexpectedResponse(
1755 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1756 .format(vmname_andid, exp))
1757
1758 if vapp and vapp.me.deployed:
1759 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1760 break
1761 else:
1762 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1763 time.sleep(INTERVAL_TIME)
1764
1765 wait_time +=INTERVAL_TIME
1766
1767 if vapp_uuid is not None:
1768 return vapp_uuid
1769 else:
1770 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1771
1772 ##
1773 ##
1774 ## based on current discussion
1775 ##
1776 ##
1777 ## server:
1778 # created: '2016-09-08T11:51:58'
1779 # description: simple-instance.linux1.1
1780 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1781 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1782 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1783 # status: ACTIVE
1784 # error_msg:
1785 # interfaces: …
1786 #
1787 def get_vminstance(self, vim_vm_uuid=None):
1788 """Returns the VM instance information from VIM"""
1789
1790 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1791
1792 vdc = self.get_vdc_details()
1793 if vdc is None:
1794 raise vimconn.vimconnConnectionException(
1795 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1796
1797 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1798 if not vm_info_dict:
1799 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1800 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1801
1802 status_key = vm_info_dict['status']
1803 error = ''
1804 try:
1805 vm_dict = {'created': vm_info_dict['created'],
1806 'description': vm_info_dict['name'],
1807 'status': vcdStatusCode2manoFormat[int(status_key)],
1808 'hostId': vm_info_dict['vmuuid'],
1809 'error_msg': error,
1810 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1811
1812 if 'interfaces' in vm_info_dict:
1813 vm_dict['interfaces'] = vm_info_dict['interfaces']
1814 else:
1815 vm_dict['interfaces'] = []
1816 except KeyError:
1817 vm_dict = {'created': '',
1818 'description': '',
1819 'status': vcdStatusCode2manoFormat[int(-1)],
1820 'hostId': vm_info_dict['vmuuid'],
1821 'error_msg': "Inconsistency state",
1822 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1823
1824 return vm_dict
1825
1826 def delete_vminstance(self, vm__vim_uuid):
1827 """Method poweroff and remove VM instance from vcloud director network.
1828
1829 Args:
1830 vm__vim_uuid: VM UUID
1831
1832 Returns:
1833 Returns the instance identifier
1834 """
1835
1836 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1837
1838 vdc = self.get_vdc_details()
1839 if vdc is None:
1840 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1841 self.tenant_name))
1842 raise vimconn.vimconnException(
1843 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1844
1845 try:
1846 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1847 if vapp_name is None:
1848 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1849 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1850 else:
1851 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1852
1853 # Delete vApp and wait for status change if task executed and vApp is None.
1854 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1855
1856 if vapp:
1857 if vapp.me.deployed:
1858 self.logger.info("Powering off vApp {}".format(vapp_name))
1859 #Power off vApp
1860 powered_off = False
1861 wait_time = 0
1862 while wait_time <= MAX_WAIT_TIME:
1863 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1864 if not vapp:
1865 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1866 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1867
1868 power_off_task = vapp.poweroff()
1869 if type(power_off_task) is GenericTask:
1870 result = self.vca.block_until_completed(power_off_task)
1871 if result:
1872 powered_off = True
1873 break
1874 else:
1875 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1876 time.sleep(INTERVAL_TIME)
1877
1878 wait_time +=INTERVAL_TIME
1879 if not powered_off:
1880 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1881 else:
1882 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1883
1884 #Undeploy vApp
1885 self.logger.info("Undeploy vApp {}".format(vapp_name))
1886 wait_time = 0
1887 undeployed = False
1888 while wait_time <= MAX_WAIT_TIME:
1889 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1890 if not vapp:
1891 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1892 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1893 undeploy_task = vapp.undeploy(action='powerOff')
1894
1895 if type(undeploy_task) is GenericTask:
1896 result = self.vca.block_until_completed(undeploy_task)
1897 if result:
1898 undeployed = True
1899 break
1900 else:
1901 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1902 time.sleep(INTERVAL_TIME)
1903
1904 wait_time +=INTERVAL_TIME
1905
1906 if not undeployed:
1907 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1908
1909 # delete vapp
1910 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1911 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1912
1913 if vapp is not None:
1914 wait_time = 0
1915 result = False
1916
1917 while wait_time <= MAX_WAIT_TIME:
1918 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1919 if not vapp:
1920 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1921 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1922
1923 delete_task = vapp.delete()
1924
1925 if type(delete_task) is GenericTask:
1926 self.vca.block_until_completed(delete_task)
1927 result = self.vca.block_until_completed(delete_task)
1928 if result:
1929 break
1930 else:
1931 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1932 time.sleep(INTERVAL_TIME)
1933
1934 wait_time +=INTERVAL_TIME
1935
1936 if not result:
1937 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1938
1939 except:
1940 self.logger.debug(traceback.format_exc())
1941 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1942
1943 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1944 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1945 return vm__vim_uuid
1946 else:
1947 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1948
1949 def refresh_vms_status(self, vm_list):
1950 """Get the status of the virtual machines and their interfaces/ports
1951 Params: the list of VM identifiers
1952 Returns a dictionary with:
1953 vm_id: #VIM id of this Virtual Machine
1954 status: #Mandatory. Text with one of:
1955 # DELETED (not found at vim)
1956 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1957 # OTHER (Vim reported other status not understood)
1958 # ERROR (VIM indicates an ERROR status)
1959 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1960 # CREATING (on building process), ERROR
1961 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1962 #
1963 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1964 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1965 interfaces:
1966 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1967 mac_address: #Text format XX:XX:XX:XX:XX:XX
1968 vim_net_id: #network id where this interface is connected
1969 vim_interface_id: #interface/port VIM id
1970 ip_address: #null, or text with IPv4, IPv6 address
1971 """
1972
1973 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1974
1975 vdc = self.get_vdc_details()
1976 if vdc is None:
1977 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1978
1979 vms_dict = {}
1980 nsx_edge_list = []
1981 for vmuuid in vm_list:
1982 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1983 if vmname is not None:
1984
1985 try:
1986 vm_pci_details = self.get_vm_pci_details(vmuuid)
1987 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1988 vm_info = the_vapp.get_vms_details()
1989 vm_status = vm_info[0]['status']
1990 vm_info[0].update(vm_pci_details)
1991
1992 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1993 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1994 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1995
1996 # get networks
1997 vm_app_networks = the_vapp.get_vms_network_info()
1998 for vapp_network in vm_app_networks:
1999 for vm_network in vapp_network:
2000 if vm_network['name'] == vmname:
2001 #Assign IP Address based on MAC Address in NSX DHCP lease info
2002 if vm_network['ip'] is None:
2003 if not nsx_edge_list:
2004 nsx_edge_list = self.get_edge_details()
2005 if nsx_edge_list is None:
2006 raise vimconn.vimconnException("refresh_vms_status:"\
2007 "Failed to get edge details from NSX Manager")
2008 if vm_network['mac'] is not None:
2009 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2010
2011 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2012 interface = {"mac_address": vm_network['mac'],
2013 "vim_net_id": vm_net_id,
2014 "vim_interface_id": vm_net_id,
2015 'ip_address': vm_network['ip']}
2016 # interface['vim_info'] = yaml.safe_dump(vm_network)
2017 vm_dict["interfaces"].append(interface)
2018 # add a vm to vm dict
2019 vms_dict.setdefault(vmuuid, vm_dict)
2020 except Exception as exp:
2021 self.logger.debug("Error in response {}".format(exp))
2022 self.logger.debug(traceback.format_exc())
2023
2024 return vms_dict
2025
2026
2027 def get_edge_details(self):
2028 """Get the NSX edge list from NSX Manager
2029 Returns list of NSX edges
2030 """
2031 edge_list = []
2032 rheaders = {'Content-Type': 'application/xml'}
2033 nsx_api_url = '/api/4.0/edges'
2034
2035 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2036
2037 try:
2038 resp = requests.get(self.nsx_manager + nsx_api_url,
2039 auth = (self.nsx_user, self.nsx_password),
2040 verify = False, headers = rheaders)
2041 if resp.status_code == requests.codes.ok:
2042 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2043 for edge_pages in paged_Edge_List:
2044 if edge_pages.tag == 'edgePage':
2045 for edge_summary in edge_pages:
2046 if edge_summary.tag == 'pagingInfo':
2047 for element in edge_summary:
2048 if element.tag == 'totalCount' and element.text == '0':
2049 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2050 .format(self.nsx_manager))
2051
2052 if edge_summary.tag == 'edgeSummary':
2053 for element in edge_summary:
2054 if element.tag == 'id':
2055 edge_list.append(element.text)
2056 else:
2057 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2058 .format(self.nsx_manager))
2059
2060 if not edge_list:
2061 raise vimconn.vimconnException("get_edge_details: "\
2062 "No NSX edge details found: {}"
2063 .format(self.nsx_manager))
2064 else:
2065 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2066 return edge_list
2067 else:
2068 self.logger.debug("get_edge_details: "
2069 "Failed to get NSX edge details from NSX Manager: {}"
2070 .format(resp.content))
2071 return None
2072
2073 except Exception as exp:
2074 self.logger.debug("get_edge_details: "\
2075 "Failed to get NSX edge details from NSX Manager: {}"
2076 .format(exp))
2077 raise vimconn.vimconnException("get_edge_details: "\
2078 "Failed to get NSX edge details from NSX Manager: {}"
2079 .format(exp))
2080
2081
2082 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2083 """Get IP address details from NSX edges, using the MAC address
2084 PARAMS: nsx_edges : List of NSX edges
2085 mac_address : Find IP address corresponding to this MAC address
2086 Returns: IP address corrresponding to the provided MAC address
2087 """
2088
2089 ip_addr = None
2090 rheaders = {'Content-Type': 'application/xml'}
2091
2092 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2093
2094 try:
2095 for edge in nsx_edges:
2096 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2097
2098 resp = requests.get(self.nsx_manager + nsx_api_url,
2099 auth = (self.nsx_user, self.nsx_password),
2100 verify = False, headers = rheaders)
2101
2102 if resp.status_code == requests.codes.ok:
2103 dhcp_leases = XmlElementTree.fromstring(resp.text)
2104 for child in dhcp_leases:
2105 if child.tag == 'dhcpLeaseInfo':
2106 dhcpLeaseInfo = child
2107 for leaseInfo in dhcpLeaseInfo:
2108 for elem in leaseInfo:
2109 if (elem.tag)=='macAddress':
2110 edge_mac_addr = elem.text
2111 if (elem.tag)=='ipAddress':
2112 ip_addr = elem.text
2113 if edge_mac_addr is not None:
2114 if edge_mac_addr == mac_address:
2115 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2116 .format(ip_addr, mac_address,edge))
2117 return ip_addr
2118 else:
2119 self.logger.debug("get_ipaddr_from_NSXedge: "\
2120 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2121 .format(resp.content))
2122
2123 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2124 return None
2125
2126 except XmlElementTree.ParseError as Err:
2127 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2128
2129
2130 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
2131 """Send and action over a VM instance from VIM
2132 Returns the vm_id if the action was successfully sent to the VIM"""
2133
2134 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2135 if vm__vim_uuid is None or action_dict is None:
2136 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2137
2138 vdc = self.get_vdc_details()
2139 if vdc is None:
2140 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
2141
2142 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2143 if vapp_name is None:
2144 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2145 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2146 else:
2147 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2148
2149 try:
2150 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2151 # TODO fix all status
2152 if "start" in action_dict:
2153 vm_info = the_vapp.get_vms_details()
2154 vm_status = vm_info[0]['status']
2155 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2156 if vm_status == "Suspended" or vm_status == "Powered off":
2157 power_on_task = the_vapp.poweron()
2158 result = self.vca.block_until_completed(power_on_task)
2159 self.instance_actions_result("start", result, vapp_name)
2160 elif "rebuild" in action_dict:
2161 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2162 rebuild_task = the_vapp.deploy(powerOn=True)
2163 result = self.vca.block_until_completed(rebuild_task)
2164 self.instance_actions_result("rebuild", result, vapp_name)
2165 elif "pause" in action_dict:
2166 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2167 pause_task = the_vapp.undeploy(action='suspend')
2168 result = self.vca.block_until_completed(pause_task)
2169 self.instance_actions_result("pause", result, vapp_name)
2170 elif "resume" in action_dict:
2171 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2172 power_task = the_vapp.poweron()
2173 result = self.vca.block_until_completed(power_task)
2174 self.instance_actions_result("resume", result, vapp_name)
2175 elif "shutoff" in action_dict or "shutdown" in action_dict:
2176 action_name , value = action_dict.items()[0]
2177 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2178 power_off_task = the_vapp.undeploy(action='powerOff')
2179 result = self.vca.block_until_completed(power_off_task)
2180 if action_name == "shutdown":
2181 self.instance_actions_result("shutdown", result, vapp_name)
2182 else:
2183 self.instance_actions_result("shutoff", result, vapp_name)
2184 elif "forceOff" in action_dict:
2185 result = the_vapp.undeploy(action='force')
2186 self.instance_actions_result("forceOff", result, vapp_name)
2187 elif "reboot" in action_dict:
2188 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2189 reboot_task = the_vapp.reboot()
2190 else:
2191 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2192 return vm__vim_uuid
2193 except Exception as exp :
2194 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2195 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2196
2197 def instance_actions_result(self, action, result, vapp_name):
2198 if result:
2199 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2200 else:
2201 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2202
2203 def get_vminstance_console(self, vm_id, console_type="vnc"):
2204 """
2205 Get a console for the virtual machine
2206 Params:
2207 vm_id: uuid of the VM
2208 console_type, can be:
2209 "novnc" (by default), "xvpvnc" for VNC types,
2210 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2211 Returns dict with the console parameters:
2212 protocol: ssh, ftp, http, https, ...
2213 server: usually ip address
2214 port: the http, ssh, ... port
2215 suffix: extra text, e.g. the http path and query string
2216 """
2217 raise vimconn.vimconnNotImplemented("Should have implemented this")
2218
2219 # NOT USED METHODS in current version
2220
2221 def host_vim2gui(self, host, server_dict):
2222 """Transform host dictionary from VIM format to GUI format,
2223 and append to the server_dict
2224 """
2225 raise vimconn.vimconnNotImplemented("Should have implemented this")
2226
2227 def get_hosts_info(self):
2228 """Get the information of deployed hosts
2229 Returns the hosts content"""
2230 raise vimconn.vimconnNotImplemented("Should have implemented this")
2231
2232 def get_hosts(self, vim_tenant):
2233 """Get the hosts and deployed instances
2234 Returns the hosts content"""
2235 raise vimconn.vimconnNotImplemented("Should have implemented this")
2236
2237 def get_processor_rankings(self):
2238 """Get the processor rankings in the VIM database"""
2239 raise vimconn.vimconnNotImplemented("Should have implemented this")
2240
2241 def new_host(self, host_data):
2242 """Adds a new host to VIM"""
2243 '''Returns status code of the VIM response'''
2244 raise vimconn.vimconnNotImplemented("Should have implemented this")
2245
2246 def new_external_port(self, port_data):
2247 """Adds a external port to VIM"""
2248 '''Returns the port identifier'''
2249 raise vimconn.vimconnNotImplemented("Should have implemented this")
2250
2251 def new_external_network(self, net_name, net_type):
2252 """Adds a external network to VIM (shared)"""
2253 '''Returns the network identifier'''
2254 raise vimconn.vimconnNotImplemented("Should have implemented this")
2255
2256 def connect_port_network(self, port_id, network_id, admin=False):
2257 """Connects a external port to a network"""
2258 '''Returns status code of the VIM response'''
2259 raise vimconn.vimconnNotImplemented("Should have implemented this")
2260
2261 def new_vminstancefromJSON(self, vm_data):
2262 """Adds a VM instance to VIM"""
2263 '''Returns the instance identifier'''
2264 raise vimconn.vimconnNotImplemented("Should have implemented this")
2265
2266 def get_network_name_by_id(self, network_uuid=None):
2267 """Method gets vcloud director network named based on supplied uuid.
2268
2269 Args:
2270 network_uuid: network_id
2271
2272 Returns:
2273 The return network name.
2274 """
2275
2276 if not network_uuid:
2277 return None
2278
2279 try:
2280 org_dict = self.get_org(self.org_uuid)
2281 if 'networks' in org_dict:
2282 org_network_dict = org_dict['networks']
2283 for net_uuid in org_network_dict:
2284 if net_uuid == network_uuid:
2285 return org_network_dict[net_uuid]
2286 except:
2287 self.logger.debug("Exception in get_network_name_by_id")
2288 self.logger.debug(traceback.format_exc())
2289
2290 return None
2291
2292 def get_network_id_by_name(self, network_name=None):
2293 """Method gets vcloud director network uuid based on supplied name.
2294
2295 Args:
2296 network_name: network_name
2297 Returns:
2298 The return network uuid.
2299 network_uuid: network_id
2300 """
2301
2302 if not network_name:
2303 self.logger.debug("get_network_id_by_name() : Network name is empty")
2304 return None
2305
2306 try:
2307 org_dict = self.get_org(self.org_uuid)
2308 if org_dict and 'networks' in org_dict:
2309 org_network_dict = org_dict['networks']
2310 for net_uuid,net_name in org_network_dict.iteritems():
2311 if net_name == network_name:
2312 return net_uuid
2313
2314 except KeyError as exp:
2315 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2316
2317 return None
2318
2319 def list_org_action(self):
2320 """
2321 Method leverages vCloud director and query for available organization for particular user
2322
2323 Args:
2324 vca - is active VCA connection.
2325 vdc_name - is a vdc name that will be used to query vms action
2326
2327 Returns:
2328 The return XML respond
2329 """
2330
2331 url_list = [self.vca.host, '/api/org']
2332 vm_list_rest_call = ''.join(url_list)
2333
2334 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2335 response = Http.get(url=vm_list_rest_call,
2336 headers=self.vca.vcloud_session.get_vcloud_headers(),
2337 verify=self.vca.verify,
2338 logger=self.vca.logger)
2339
2340 if response.status_code == 403:
2341 response = self.retry_rest('GET', vm_list_rest_call)
2342
2343 if response.status_code == requests.codes.ok:
2344 return response.content
2345
2346 return None
2347
2348 def get_org_action(self, org_uuid=None):
2349 """
2350 Method leverages vCloud director and retrieve available object fdr organization.
2351
2352 Args:
2353 vca - is active VCA connection.
2354 vdc_name - is a vdc name that will be used to query vms action
2355
2356 Returns:
2357 The return XML respond
2358 """
2359
2360 if org_uuid is None:
2361 return None
2362
2363 url_list = [self.vca.host, '/api/org/', org_uuid]
2364 vm_list_rest_call = ''.join(url_list)
2365
2366 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2367 response = Http.get(url=vm_list_rest_call,
2368 headers=self.vca.vcloud_session.get_vcloud_headers(),
2369 verify=self.vca.verify,
2370 logger=self.vca.logger)
2371
2372 #Retry login if session expired & retry sending request
2373 if response.status_code == 403:
2374 response = self.retry_rest('GET', vm_list_rest_call)
2375
2376 if response.status_code == requests.codes.ok:
2377 return response.content
2378
2379 return None
2380
2381 def get_org(self, org_uuid=None):
2382 """
2383 Method retrieves available organization in vCloud Director
2384
2385 Args:
2386 org_uuid - is a organization uuid.
2387
2388 Returns:
2389 The return dictionary with following key
2390 "network" - for network list under the org
2391 "catalogs" - for network list under the org
2392 "vdcs" - for vdc list under org
2393 """
2394
2395 org_dict = {}
2396
2397 if org_uuid is None:
2398 return org_dict
2399
2400 content = self.get_org_action(org_uuid=org_uuid)
2401 try:
2402 vdc_list = {}
2403 network_list = {}
2404 catalog_list = {}
2405 vm_list_xmlroot = XmlElementTree.fromstring(content)
2406 for child in vm_list_xmlroot:
2407 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2408 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2409 org_dict['vdcs'] = vdc_list
2410 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2411 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2412 org_dict['networks'] = network_list
2413 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2414 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2415 org_dict['catalogs'] = catalog_list
2416 except:
2417 pass
2418
2419 return org_dict
2420
2421 def get_org_list(self):
2422 """
2423 Method retrieves available organization in vCloud Director
2424
2425 Args:
2426 vca - is active VCA connection.
2427
2428 Returns:
2429 The return dictionary and key for each entry VDC UUID
2430 """
2431
2432 org_dict = {}
2433
2434 content = self.list_org_action()
2435 try:
2436 vm_list_xmlroot = XmlElementTree.fromstring(content)
2437 for vm_xml in vm_list_xmlroot:
2438 if vm_xml.tag.split("}")[1] == 'Org':
2439 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2440 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2441 except:
2442 pass
2443
2444 return org_dict
2445
2446 def vms_view_action(self, vdc_name=None):
2447 """ Method leverages vCloud director vms query call
2448
2449 Args:
2450 vca - is active VCA connection.
2451 vdc_name - is a vdc name that will be used to query vms action
2452
2453 Returns:
2454 The return XML respond
2455 """
2456 vca = self.connect()
2457 if vdc_name is None:
2458 return None
2459
2460 url_list = [vca.host, '/api/vms/query']
2461 vm_list_rest_call = ''.join(url_list)
2462
2463 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2464 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2465 vca.vcloud_session.organization.Link)
2466 if len(refs) == 1:
2467 response = Http.get(url=vm_list_rest_call,
2468 headers=vca.vcloud_session.get_vcloud_headers(),
2469 verify=vca.verify,
2470 logger=vca.logger)
2471 if response.status_code == requests.codes.ok:
2472 return response.content
2473
2474 return None
2475
2476 def get_vapp_list(self, vdc_name=None):
2477 """
2478 Method retrieves vApp list deployed vCloud director and returns a dictionary
2479 contains a list of all vapp deployed for queried VDC.
2480 The key for a dictionary is vApp UUID
2481
2482
2483 Args:
2484 vca - is active VCA connection.
2485 vdc_name - is a vdc name that will be used to query vms action
2486
2487 Returns:
2488 The return dictionary and key for each entry vapp UUID
2489 """
2490
2491 vapp_dict = {}
2492 if vdc_name is None:
2493 return vapp_dict
2494
2495 content = self.vms_view_action(vdc_name=vdc_name)
2496 try:
2497 vm_list_xmlroot = XmlElementTree.fromstring(content)
2498 for vm_xml in vm_list_xmlroot:
2499 if vm_xml.tag.split("}")[1] == 'VMRecord':
2500 if vm_xml.attrib['isVAppTemplate'] == 'true':
2501 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2502 if 'vappTemplate-' in rawuuid[0]:
2503 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2504 # vm and use raw UUID as key
2505 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2506 except:
2507 pass
2508
2509 return vapp_dict
2510
2511 def get_vm_list(self, vdc_name=None):
2512 """
2513 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2514 contains a list of all VM's deployed for queried VDC.
2515 The key for a dictionary is VM UUID
2516
2517
2518 Args:
2519 vca - is active VCA connection.
2520 vdc_name - is a vdc name that will be used to query vms action
2521
2522 Returns:
2523 The return dictionary and key for each entry vapp UUID
2524 """
2525 vm_dict = {}
2526
2527 if vdc_name is None:
2528 return vm_dict
2529
2530 content = self.vms_view_action(vdc_name=vdc_name)
2531 try:
2532 vm_list_xmlroot = XmlElementTree.fromstring(content)
2533 for vm_xml in vm_list_xmlroot:
2534 if vm_xml.tag.split("}")[1] == 'VMRecord':
2535 if vm_xml.attrib['isVAppTemplate'] == 'false':
2536 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2537 if 'vm-' in rawuuid[0]:
2538 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2539 # vm and use raw UUID as key
2540 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2541 except:
2542 pass
2543
2544 return vm_dict
2545
2546 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2547 """
2548 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2549 contains a list of all VM's deployed for queried VDC.
2550 The key for a dictionary is VM UUID
2551
2552
2553 Args:
2554 vca - is active VCA connection.
2555 vdc_name - is a vdc name that will be used to query vms action
2556
2557 Returns:
2558 The return dictionary and key for each entry vapp UUID
2559 """
2560 vm_dict = {}
2561 vca = self.connect()
2562 if not vca:
2563 raise vimconn.vimconnConnectionException("self.connect() is failed")
2564
2565 if vdc_name is None:
2566 return vm_dict
2567
2568 content = self.vms_view_action(vdc_name=vdc_name)
2569 try:
2570 vm_list_xmlroot = XmlElementTree.fromstring(content)
2571 for vm_xml in vm_list_xmlroot:
2572 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2573 # lookup done by UUID
2574 if isuuid:
2575 if vapp_name in vm_xml.attrib['container']:
2576 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2577 if 'vm-' in rawuuid[0]:
2578 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2579 break
2580 # lookup done by Name
2581 else:
2582 if vapp_name in vm_xml.attrib['name']:
2583 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2584 if 'vm-' in rawuuid[0]:
2585 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2586 break
2587 except:
2588 pass
2589
2590 return vm_dict
2591
2592 def get_network_action(self, network_uuid=None):
2593 """
2594 Method leverages vCloud director and query network based on network uuid
2595
2596 Args:
2597 vca - is active VCA connection.
2598 network_uuid - is a network uuid
2599
2600 Returns:
2601 The return XML respond
2602 """
2603
2604 if network_uuid is None:
2605 return None
2606
2607 url_list = [self.vca.host, '/api/network/', network_uuid]
2608 vm_list_rest_call = ''.join(url_list)
2609
2610 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2611 response = Http.get(url=vm_list_rest_call,
2612 headers=self.vca.vcloud_session.get_vcloud_headers(),
2613 verify=self.vca.verify,
2614 logger=self.vca.logger)
2615
2616 #Retry login if session expired & retry sending request
2617 if response.status_code == 403:
2618 response = self.retry_rest('GET', vm_list_rest_call)
2619
2620 if response.status_code == requests.codes.ok:
2621 return response.content
2622
2623 return None
2624
2625 def get_vcd_network(self, network_uuid=None):
2626 """
2627 Method retrieves available network from vCloud Director
2628
2629 Args:
2630 network_uuid - is VCD network UUID
2631
2632 Each element serialized as key : value pair
2633
2634 Following keys available for access. network_configuration['Gateway'}
2635 <Configuration>
2636 <IpScopes>
2637 <IpScope>
2638 <IsInherited>true</IsInherited>
2639 <Gateway>172.16.252.100</Gateway>
2640 <Netmask>255.255.255.0</Netmask>
2641 <Dns1>172.16.254.201</Dns1>
2642 <Dns2>172.16.254.202</Dns2>
2643 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2644 <IsEnabled>true</IsEnabled>
2645 <IpRanges>
2646 <IpRange>
2647 <StartAddress>172.16.252.1</StartAddress>
2648 <EndAddress>172.16.252.99</EndAddress>
2649 </IpRange>
2650 </IpRanges>
2651 </IpScope>
2652 </IpScopes>
2653 <FenceMode>bridged</FenceMode>
2654
2655 Returns:
2656 The return dictionary and key for each entry vapp UUID
2657 """
2658
2659 network_configuration = {}
2660 if network_uuid is None:
2661 return network_uuid
2662
2663 try:
2664 content = self.get_network_action(network_uuid=network_uuid)
2665 vm_list_xmlroot = XmlElementTree.fromstring(content)
2666
2667 network_configuration['status'] = vm_list_xmlroot.get("status")
2668 network_configuration['name'] = vm_list_xmlroot.get("name")
2669 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2670
2671 for child in vm_list_xmlroot:
2672 if child.tag.split("}")[1] == 'IsShared':
2673 network_configuration['isShared'] = child.text.strip()
2674 if child.tag.split("}")[1] == 'Configuration':
2675 for configuration in child.iter():
2676 tagKey = configuration.tag.split("}")[1].strip()
2677 if tagKey != "":
2678 network_configuration[tagKey] = configuration.text.strip()
2679 return network_configuration
2680 except Exception as exp :
2681 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2682 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2683
2684 return network_configuration
2685
2686 def delete_network_action(self, network_uuid=None):
2687 """
2688 Method delete given network from vCloud director
2689
2690 Args:
2691 network_uuid - is a network uuid that client wish to delete
2692
2693 Returns:
2694 The return None or XML respond or false
2695 """
2696
2697 vca = self.connect_as_admin()
2698 if not vca:
2699 raise vimconn.vimconnConnectionException("self.connect() is failed")
2700 if network_uuid is None:
2701 return False
2702
2703 url_list = [vca.host, '/api/admin/network/', network_uuid]
2704 vm_list_rest_call = ''.join(url_list)
2705
2706 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2707 response = Http.delete(url=vm_list_rest_call,
2708 headers=vca.vcloud_session.get_vcloud_headers(),
2709 verify=vca.verify,
2710 logger=vca.logger)
2711
2712 if response.status_code == 202:
2713 return True
2714
2715 return False
2716
2717 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2718 ip_profile=None, isshared='true'):
2719 """
2720 Method create network in vCloud director
2721
2722 Args:
2723 network_name - is network name to be created.
2724 net_type - can be 'bridge','data','ptp','mgmt'.
2725 ip_profile is a dict containing the IP parameters of the network
2726 isshared - is a boolean
2727 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2728 It optional attribute. by default if no parent network indicate the first available will be used.
2729
2730 Returns:
2731 The return network uuid or return None
2732 """
2733
2734 new_network_name = [network_name, '-', str(uuid.uuid4())]
2735 content = self.create_network_rest(network_name=''.join(new_network_name),
2736 ip_profile=ip_profile,
2737 net_type=net_type,
2738 parent_network_uuid=parent_network_uuid,
2739 isshared=isshared)
2740 if content is None:
2741 self.logger.debug("Failed create network {}.".format(network_name))
2742 return None
2743
2744 try:
2745 vm_list_xmlroot = XmlElementTree.fromstring(content)
2746 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2747 if len(vcd_uuid) == 4:
2748 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2749 return vcd_uuid[3]
2750 except:
2751 self.logger.debug("Failed create network {}".format(network_name))
2752 return None
2753
2754 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2755 ip_profile=None, isshared='true'):
2756 """
2757 Method create network in vCloud director
2758
2759 Args:
2760 network_name - is network name to be created.
2761 net_type - can be 'bridge','data','ptp','mgmt'.
2762 ip_profile is a dict containing the IP parameters of the network
2763 isshared - is a boolean
2764 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2765 It optional attribute. by default if no parent network indicate the first available will be used.
2766
2767 Returns:
2768 The return network uuid or return None
2769 """
2770
2771 vca = self.connect_as_admin()
2772 if not vca:
2773 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2774 if network_name is None:
2775 return None
2776
2777 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2778 vm_list_rest_call = ''.join(url_list)
2779 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2780 response = Http.get(url=vm_list_rest_call,
2781 headers=vca.vcloud_session.get_vcloud_headers(),
2782 verify=vca.verify,
2783 logger=vca.logger)
2784
2785 provider_network = None
2786 available_networks = None
2787 add_vdc_rest_url = None
2788
2789 if response.status_code != requests.codes.ok:
2790 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2791 response.status_code))
2792 return None
2793 else:
2794 try:
2795 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2796 for child in vm_list_xmlroot:
2797 if child.tag.split("}")[1] == 'ProviderVdcReference':
2798 provider_network = child.attrib.get('href')
2799 # application/vnd.vmware.admin.providervdc+xml
2800 if child.tag.split("}")[1] == 'Link':
2801 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2802 and child.attrib.get('rel') == 'add':
2803 add_vdc_rest_url = child.attrib.get('href')
2804 except:
2805 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2806 self.logger.debug("Respond body {}".format(response.content))
2807 return None
2808
2809 # find pvdc provided available network
2810 response = Http.get(url=provider_network,
2811 headers=vca.vcloud_session.get_vcloud_headers(),
2812 verify=vca.verify,
2813 logger=vca.logger)
2814 if response.status_code != requests.codes.ok:
2815 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2816 response.status_code))
2817 return None
2818
2819 # available_networks.split("/")[-1]
2820
2821 if parent_network_uuid is None:
2822 try:
2823 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2824 for child in vm_list_xmlroot.iter():
2825 if child.tag.split("}")[1] == 'AvailableNetworks':
2826 for networks in child.iter():
2827 # application/vnd.vmware.admin.network+xml
2828 if networks.attrib.get('href') is not None:
2829 available_networks = networks.attrib.get('href')
2830 break
2831 except:
2832 return None
2833
2834 try:
2835 #Configure IP profile of the network
2836 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2837
2838 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2839 subnet_rand = random.randint(0, 255)
2840 ip_base = "192.168.{}.".format(subnet_rand)
2841 ip_profile['subnet_address'] = ip_base + "0/24"
2842 else:
2843 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2844
2845 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2846 ip_profile['gateway_address']=ip_base + "1"
2847 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2848 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2849 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2850 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2851 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2852 ip_profile['dhcp_start_address']=ip_base + "3"
2853 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2854 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2855 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2856 ip_profile['dns_address']=ip_base + "2"
2857
2858 gateway_address=ip_profile['gateway_address']
2859 dhcp_count=int(ip_profile['dhcp_count'])
2860 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2861
2862 if ip_profile['dhcp_enabled']==True:
2863 dhcp_enabled='true'
2864 else:
2865 dhcp_enabled='false'
2866 dhcp_start_address=ip_profile['dhcp_start_address']
2867
2868 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2869 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2870 end_ip_int += dhcp_count - 1
2871 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2872
2873 ip_version=ip_profile['ip_version']
2874 dns_address=ip_profile['dns_address']
2875 except KeyError as exp:
2876 self.logger.debug("Create Network REST: Key error {}".format(exp))
2877 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2878
2879 # either use client provided UUID or search for a first available
2880 # if both are not defined we return none
2881 if parent_network_uuid is not None:
2882 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2883 add_vdc_rest_url = ''.join(url_list)
2884
2885 #Creating all networks as Direct Org VDC type networks.
2886 #Unused in case of Underlay (data/ptp) network interface.
2887 fence_mode="bridged"
2888 is_inherited='false'
2889 dns_list = dns_address.split(";")
2890 dns1 = dns_list[0]
2891 dns2_text = ""
2892 if len(dns_list) >= 2:
2893 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2894 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2895 <Description>Openmano created</Description>
2896 <Configuration>
2897 <IpScopes>
2898 <IpScope>
2899 <IsInherited>{1:s}</IsInherited>
2900 <Gateway>{2:s}</Gateway>
2901 <Netmask>{3:s}</Netmask>
2902 <Dns1>{4:s}</Dns1>{5:s}
2903 <IsEnabled>{6:s}</IsEnabled>
2904 <IpRanges>
2905 <IpRange>
2906 <StartAddress>{7:s}</StartAddress>
2907 <EndAddress>{8:s}</EndAddress>
2908 </IpRange>
2909 </IpRanges>
2910 </IpScope>
2911 </IpScopes>
2912 <ParentNetwork href="{9:s}"/>
2913 <FenceMode>{10:s}</FenceMode>
2914 </Configuration>
2915 <IsShared>{11:s}</IsShared>
2916 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2917 subnet_address, dns1, dns2_text, dhcp_enabled,
2918 dhcp_start_address, dhcp_end_address, available_networks,
2919 fence_mode, isshared)
2920
2921 headers = vca.vcloud_session.get_vcloud_headers()
2922 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2923 try:
2924 response = Http.post(url=add_vdc_rest_url,
2925 headers=headers,
2926 data=data,
2927 verify=vca.verify,
2928 logger=vca.logger)
2929
2930 if response.status_code != 201:
2931 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2932 .format(response.status_code,response.content))
2933 else:
2934 network = networkType.parseString(response.content, True)
2935 create_nw_task = network.get_Tasks().get_Task()[0]
2936
2937 # if we all ok we respond with content after network creation completes
2938 # otherwise by default return None
2939 if create_nw_task is not None:
2940 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2941 status = vca.block_until_completed(create_nw_task)
2942 if status:
2943 return response.content
2944 else:
2945 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2946 .format(response.content))
2947 except Exception as exp:
2948 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2949
2950 return None
2951
2952 def convert_cidr_to_netmask(self, cidr_ip=None):
2953 """
2954 Method sets convert CIDR netmask address to normal IP format
2955 Args:
2956 cidr_ip : CIDR IP address
2957 Returns:
2958 netmask : Converted netmask
2959 """
2960 if cidr_ip is not None:
2961 if '/' in cidr_ip:
2962 network, net_bits = cidr_ip.split('/')
2963 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2964 else:
2965 netmask = cidr_ip
2966 return netmask
2967 return None
2968
2969 def get_provider_rest(self, vca=None):
2970 """
2971 Method gets provider vdc view from vcloud director
2972
2973 Args:
2974 network_name - is network name to be created.
2975 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2976 It optional attribute. by default if no parent network indicate the first available will be used.
2977
2978 Returns:
2979 The return xml content of respond or None
2980 """
2981
2982 url_list = [vca.host, '/api/admin']
2983 response = Http.get(url=''.join(url_list),
2984 headers=vca.vcloud_session.get_vcloud_headers(),
2985 verify=vca.verify,
2986 logger=vca.logger)
2987
2988 if response.status_code == requests.codes.ok:
2989 return response.content
2990 return None
2991
2992 def create_vdc(self, vdc_name=None):
2993
2994 vdc_dict = {}
2995
2996 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2997 if xml_content is not None:
2998 try:
2999 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3000 for child in task_resp_xmlroot:
3001 if child.tag.split("}")[1] == 'Owner':
3002 vdc_id = child.attrib.get('href').split("/")[-1]
3003 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3004 return vdc_dict
3005 except:
3006 self.logger.debug("Respond body {}".format(xml_content))
3007
3008 return None
3009
3010 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3011 """
3012 Method create vdc in vCloud director based on VDC template.
3013 it uses pre-defined template that must be named openmano
3014
3015 Args:
3016 vdc_name - name of a new vdc.
3017
3018 Returns:
3019 The return xml content of respond or None
3020 """
3021
3022 self.logger.info("Creating new vdc {}".format(vdc_name))
3023 vca = self.connect()
3024 if not vca:
3025 raise vimconn.vimconnConnectionException("self.connect() is failed")
3026 if vdc_name is None:
3027 return None
3028
3029 url_list = [vca.host, '/api/vdcTemplates']
3030 vm_list_rest_call = ''.join(url_list)
3031 response = Http.get(url=vm_list_rest_call,
3032 headers=vca.vcloud_session.get_vcloud_headers(),
3033 verify=vca.verify,
3034 logger=vca.logger)
3035
3036 # container url to a template
3037 vdc_template_ref = None
3038 try:
3039 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3040 for child in vm_list_xmlroot:
3041 # application/vnd.vmware.admin.providervdc+xml
3042 # we need find a template from witch we instantiate VDC
3043 if child.tag.split("}")[1] == 'VdcTemplate':
3044 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3045 vdc_template_ref = child.attrib.get('href')
3046 except:
3047 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3048 self.logger.debug("Respond body {}".format(response.content))
3049 return None
3050
3051 # if we didn't found required pre defined template we return None
3052 if vdc_template_ref is None:
3053 return None
3054
3055 try:
3056 # instantiate vdc
3057 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3058 vm_list_rest_call = ''.join(url_list)
3059 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3060 <Source href="{1:s}"></Source>
3061 <Description>opnemano</Description>
3062 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3063 headers = vca.vcloud_session.get_vcloud_headers()
3064 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3065 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3066 logger=vca.logger)
3067
3068 vdc_task = taskType.parseString(response.content, True)
3069 if type(vdc_task) is GenericTask:
3070 self.vca.block_until_completed(vdc_task)
3071
3072 # if we all ok we respond with content otherwise by default None
3073 if response.status_code >= 200 and response.status_code < 300:
3074 return response.content
3075 return None
3076 except:
3077 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3078 self.logger.debug("Respond body {}".format(response.content))
3079
3080 return None
3081
3082 def create_vdc_rest(self, vdc_name=None):
3083 """
3084 Method create network in vCloud director
3085
3086 Args:
3087 network_name - is network name to be created.
3088 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3089 It optional attribute. by default if no parent network indicate the first available will be used.
3090
3091 Returns:
3092 The return network uuid or return None
3093 """
3094
3095 self.logger.info("Creating new vdc {}".format(vdc_name))
3096
3097 vca = self.connect_as_admin()
3098 if not vca:
3099 raise vimconn.vimconnConnectionException("self.connect() is failed")
3100 if vdc_name is None:
3101 return None
3102
3103 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3104 vm_list_rest_call = ''.join(url_list)
3105 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3106 response = Http.get(url=vm_list_rest_call,
3107 headers=vca.vcloud_session.get_vcloud_headers(),
3108 verify=vca.verify,
3109 logger=vca.logger)
3110
3111 provider_vdc_ref = None
3112 add_vdc_rest_url = None
3113 available_networks = None
3114
3115 if response.status_code != requests.codes.ok:
3116 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3117 response.status_code))
3118 return None
3119 else:
3120 try:
3121 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3122 for child in vm_list_xmlroot:
3123 # application/vnd.vmware.admin.providervdc+xml
3124 if child.tag.split("}")[1] == 'Link':
3125 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3126 and child.attrib.get('rel') == 'add':
3127 add_vdc_rest_url = child.attrib.get('href')
3128 except:
3129 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3130 self.logger.debug("Respond body {}".format(response.content))
3131 return None
3132
3133 response = self.get_provider_rest(vca=vca)
3134 try:
3135 vm_list_xmlroot = XmlElementTree.fromstring(response)
3136 for child in vm_list_xmlroot:
3137 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3138 for sub_child in child:
3139 provider_vdc_ref = sub_child.attrib.get('href')
3140 except:
3141 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3142 self.logger.debug("Respond body {}".format(response))
3143 return None
3144
3145 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3146 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3147 <AllocationModel>ReservationPool</AllocationModel>
3148 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3149 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3150 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3151 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3152 <ProviderVdcReference
3153 name="Main Provider"
3154 href="{2:s}" />
3155 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3156 escape(vdc_name),
3157 provider_vdc_ref)
3158
3159 headers = vca.vcloud_session.get_vcloud_headers()
3160 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3161 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3162 logger=vca.logger)
3163
3164 # if we all ok we respond with content otherwise by default None
3165 if response.status_code == 201:
3166 return response.content
3167 return None
3168
3169 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3170 """
3171 Method retrieve vapp detail from vCloud director
3172
3173 Args:
3174 vapp_uuid - is vapp identifier.
3175
3176 Returns:
3177 The return network uuid or return None
3178 """
3179
3180 parsed_respond = {}
3181 vca = None
3182
3183 if need_admin_access:
3184 vca = self.connect_as_admin()
3185 else:
3186 vca = self.vca
3187
3188 if not vca:
3189 raise vimconn.vimconnConnectionException("self.connect() is failed")
3190 if vapp_uuid is None:
3191 return None
3192
3193 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3194 get_vapp_restcall = ''.join(url_list)
3195
3196 if vca.vcloud_session and vca.vcloud_session.organization:
3197 response = Http.get(url=get_vapp_restcall,
3198 headers=vca.vcloud_session.get_vcloud_headers(),
3199 verify=vca.verify,
3200 logger=vca.logger)
3201
3202 if response.status_code == 403:
3203 if need_admin_access == False:
3204 response = self.retry_rest('GET', get_vapp_restcall)
3205
3206 if response.status_code != requests.codes.ok:
3207 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3208 response.status_code))
3209 return parsed_respond
3210
3211 try:
3212 xmlroot_respond = XmlElementTree.fromstring(response.content)
3213 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3214
3215 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3216 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3217 'vmw': 'http://www.vmware.com/schema/ovf',
3218 'vm': 'http://www.vmware.com/vcloud/v1.5',
3219 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3220 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3221 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3222 }
3223
3224 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3225 if created_section is not None:
3226 parsed_respond['created'] = created_section.text
3227
3228 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3229 if network_section is not None and 'networkName' in network_section.attrib:
3230 parsed_respond['networkname'] = network_section.attrib['networkName']
3231
3232 ipscopes_section = \
3233 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3234 namespaces)
3235 if ipscopes_section is not None:
3236 for ipscope in ipscopes_section:
3237 for scope in ipscope:
3238 tag_key = scope.tag.split("}")[1]
3239 if tag_key == 'IpRanges':
3240 ip_ranges = scope.getchildren()
3241 for ipblock in ip_ranges:
3242 for block in ipblock:
3243 parsed_respond[block.tag.split("}")[1]] = block.text
3244 else:
3245 parsed_respond[tag_key] = scope.text
3246
3247 # parse children section for other attrib
3248 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3249 if children_section is not None:
3250 parsed_respond['name'] = children_section.attrib['name']
3251 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3252 if "nestedHypervisorEnabled" in children_section.attrib else None
3253 parsed_respond['deployed'] = children_section.attrib['deployed']
3254 parsed_respond['status'] = children_section.attrib['status']
3255 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3256 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3257 nic_list = []
3258 for adapters in network_adapter:
3259 adapter_key = adapters.tag.split("}")[1]
3260 if adapter_key == 'PrimaryNetworkConnectionIndex':
3261 parsed_respond['primarynetwork'] = adapters.text
3262 if adapter_key == 'NetworkConnection':
3263 vnic = {}
3264 if 'network' in adapters.attrib:
3265 vnic['network'] = adapters.attrib['network']
3266 for adapter in adapters:
3267 setting_key = adapter.tag.split("}")[1]
3268 vnic[setting_key] = adapter.text
3269 nic_list.append(vnic)
3270
3271 for link in children_section:
3272 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3273 if link.attrib['rel'] == 'screen:acquireTicket':
3274 parsed_respond['acquireTicket'] = link.attrib
3275 if link.attrib['rel'] == 'screen:acquireMksTicket':
3276 parsed_respond['acquireMksTicket'] = link.attrib
3277
3278 parsed_respond['interfaces'] = nic_list
3279 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3280 if vCloud_extension_section is not None:
3281 vm_vcenter_info = {}
3282 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3283 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3284 if vmext is not None:
3285 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3286 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3287
3288 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3289 vm_virtual_hardware_info = {}
3290 if virtual_hardware_section is not None:
3291 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3292 if item.find("rasd:Description",namespaces).text == "Hard disk":
3293 disk_size = item.find("rasd:HostResource" ,namespaces
3294 ).attrib["{"+namespaces['vm']+"}capacity"]
3295
3296 vm_virtual_hardware_info["disk_size"]= disk_size
3297 break
3298
3299 for link in virtual_hardware_section:
3300 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3301 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3302 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3303 break
3304
3305 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3306 except Exception as exp :
3307 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3308 return parsed_respond
3309
3310 def acuire_console(self, vm_uuid=None):
3311
3312 if vm_uuid is None:
3313 return None
3314
3315 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3316 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3317 console_dict = vm_dict['acquireTicket']
3318 console_rest_call = console_dict['href']
3319
3320 response = Http.post(url=console_rest_call,
3321 headers=self.vca.vcloud_session.get_vcloud_headers(),
3322 verify=self.vca.verify,
3323 logger=self.vca.logger)
3324 if response.status_code == 403:
3325 response = self.retry_rest('POST', console_rest_call)
3326
3327 if response.status_code == requests.codes.ok:
3328 return response.content
3329
3330 return None
3331
3332 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3333 """
3334 Method retrieve vm disk details
3335
3336 Args:
3337 vapp_uuid - is vapp identifier.
3338 flavor_disk - disk size as specified in VNFD (flavor)
3339
3340 Returns:
3341 The return network uuid or return None
3342 """
3343 status = None
3344 try:
3345 #Flavor disk is in GB convert it into MB
3346 flavor_disk = int(flavor_disk) * 1024
3347 vm_details = self.get_vapp_details_rest(vapp_uuid)
3348 if vm_details:
3349 vm_name = vm_details["name"]
3350 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3351
3352 if vm_details and "vm_virtual_hardware" in vm_details:
3353 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3354 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3355
3356 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3357
3358 if flavor_disk > vm_disk:
3359 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3360 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3361 vm_disk, flavor_disk ))
3362 else:
3363 status = True
3364 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3365
3366 return status
3367 except Exception as exp:
3368 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3369
3370
3371 def modify_vm_disk_rest(self, disk_href , disk_size):
3372 """
3373 Method retrieve modify vm disk size
3374
3375 Args:
3376 disk_href - vCD API URL to GET and PUT disk data
3377 disk_size - disk size as specified in VNFD (flavor)
3378
3379 Returns:
3380 The return network uuid or return None
3381 """
3382 if disk_href is None or disk_size is None:
3383 return None
3384
3385 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3386 response = Http.get(url=disk_href,
3387 headers=self.vca.vcloud_session.get_vcloud_headers(),
3388 verify=self.vca.verify,
3389 logger=self.vca.logger)
3390
3391 if response.status_code == 403:
3392 response = self.retry_rest('GET', disk_href)
3393
3394 if response.status_code != requests.codes.ok:
3395 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3396 response.status_code))
3397 return None
3398 try:
3399 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3400 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3401 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3402
3403 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3404 if item.find("rasd:Description",namespaces).text == "Hard disk":
3405 disk_item = item.find("rasd:HostResource" ,namespaces )
3406 if disk_item is not None:
3407 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3408 break
3409
3410 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3411 xml_declaration=True)
3412
3413 #Send PUT request to modify disk size
3414 headers = self.vca.vcloud_session.get_vcloud_headers()
3415 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3416
3417 response = Http.put(url=disk_href,
3418 data=data,
3419 headers=headers,
3420 verify=self.vca.verify, logger=self.logger)
3421
3422 if response.status_code == 403:
3423 add_headers = {'Content-Type': headers['Content-Type']}
3424 response = self.retry_rest('PUT', disk_href, add_headers, data)
3425
3426 if response.status_code != 202:
3427 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3428 response.status_code))
3429 else:
3430 modify_disk_task = taskType.parseString(response.content, True)
3431 if type(modify_disk_task) is GenericTask:
3432 status = self.vca.block_until_completed(modify_disk_task)
3433 return status
3434
3435 return None
3436
3437 except Exception as exp :
3438 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3439 return None
3440
3441 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3442 """
3443 Method to attach pci devices to VM
3444
3445 Args:
3446 vapp_uuid - uuid of vApp/VM
3447 pci_devices - pci devices infromation as specified in VNFD (flavor)
3448
3449 Returns:
3450 The status of add pci device task , vm object and
3451 vcenter_conect object
3452 """
3453 vm_obj = None
3454 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3455 vcenter_conect, content = self.get_vcenter_content()
3456 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3457
3458 if vm_moref_id:
3459 try:
3460 no_of_pci_devices = len(pci_devices)
3461 if no_of_pci_devices > 0:
3462 #Get VM and its host
3463 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3464 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3465 if host_obj and vm_obj:
3466 #get PCI devies from host on which vapp is currently installed
3467 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3468
3469 if avilable_pci_devices is None:
3470 #find other hosts with active pci devices
3471 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3472 content,
3473 no_of_pci_devices
3474 )
3475
3476 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3477 #Migrate vm to the host where PCI devices are availble
3478 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3479 task = self.relocate_vm(new_host_obj, vm_obj)
3480 if task is not None:
3481 result = self.wait_for_vcenter_task(task, vcenter_conect)
3482 self.logger.info("Migrate VM status: {}".format(result))
3483 host_obj = new_host_obj
3484 else:
3485 self.logger.info("Fail to migrate VM : {}".format(result))
3486 raise vimconn.vimconnNotFoundException(
3487 "Fail to migrate VM : {} to host {}".format(
3488 vmname_andid,
3489 new_host_obj)
3490 )
3491
3492 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3493 #Add PCI devices one by one
3494 for pci_device in avilable_pci_devices:
3495 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3496 if task:
3497 status= self.wait_for_vcenter_task(task, vcenter_conect)
3498 if status:
3499 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3500 else:
3501 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3502 return True, vm_obj, vcenter_conect
3503 else:
3504 self.logger.error("Currently there is no host with"\
3505 " {} number of avaialble PCI devices required for VM {}".format(
3506 no_of_pci_devices,
3507 vmname_andid)
3508 )
3509 raise vimconn.vimconnNotFoundException(
3510 "Currently there is no host with {} "\
3511 "number of avaialble PCI devices required for VM {}".format(
3512 no_of_pci_devices,
3513 vmname_andid))
3514 else:
3515 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3516
3517 except vmodl.MethodFault as error:
3518 self.logger.error("Error occurred while adding PCI devices {} ",error)
3519 return None, vm_obj, vcenter_conect
3520
3521 def get_vm_obj(self, content, mob_id):
3522 """
3523 Method to get the vsphere VM object associated with a given morf ID
3524 Args:
3525 vapp_uuid - uuid of vApp/VM
3526 content - vCenter content object
3527 mob_id - mob_id of VM
3528
3529 Returns:
3530 VM and host object
3531 """
3532 vm_obj = None
3533 host_obj = None
3534 try :
3535 container = content.viewManager.CreateContainerView(content.rootFolder,
3536 [vim.VirtualMachine], True
3537 )
3538 for vm in container.view:
3539 mobID = vm._GetMoId()
3540 if mobID == mob_id:
3541 vm_obj = vm
3542 host_obj = vm_obj.runtime.host
3543 break
3544 except Exception as exp:
3545 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3546 return host_obj, vm_obj
3547
3548 def get_pci_devices(self, host, need_devices):
3549 """
3550 Method to get the details of pci devices on given host
3551 Args:
3552 host - vSphere host object
3553 need_devices - number of pci devices needed on host
3554
3555 Returns:
3556 array of pci devices
3557 """
3558 all_devices = []
3559 all_device_ids = []
3560 used_devices_ids = []
3561
3562 try:
3563 if host:
3564 pciPassthruInfo = host.config.pciPassthruInfo
3565 pciDevies = host.hardware.pciDevice
3566
3567 for pci_status in pciPassthruInfo:
3568 if pci_status.passthruActive:
3569 for device in pciDevies:
3570 if device.id == pci_status.id:
3571 all_device_ids.append(device.id)
3572 all_devices.append(device)
3573
3574 #check if devices are in use
3575 avalible_devices = all_devices
3576 for vm in host.vm:
3577 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3578 vm_devices = vm.config.hardware.device
3579 for device in vm_devices:
3580 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3581 if device.backing.id in all_device_ids:
3582 for use_device in avalible_devices:
3583 if use_device.id == device.backing.id:
3584 avalible_devices.remove(use_device)
3585 used_devices_ids.append(device.backing.id)
3586 self.logger.debug("Device {} from devices {}"\
3587 "is in use".format(device.backing.id,
3588 device)
3589 )
3590 if len(avalible_devices) < need_devices:
3591 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3592 need_devices))
3593 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3594 avalible_devices))
3595 return None
3596 else:
3597 required_devices = avalible_devices[:need_devices]
3598 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3599 len(avalible_devices),
3600 host,
3601 need_devices))
3602 self.logger.info("Retruning {} devices as {}".format(need_devices,
3603 required_devices ))
3604 return required_devices
3605
3606 except Exception as exp:
3607 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3608
3609 return None
3610
3611 def get_host_and_PCIdevices(self, content, need_devices):
3612 """
3613 Method to get the details of pci devices infromation on all hosts
3614
3615 Args:
3616 content - vSphere host object
3617 need_devices - number of pci devices needed on host
3618
3619 Returns:
3620 array of pci devices and host object
3621 """
3622 host_obj = None
3623 pci_device_objs = None
3624 try:
3625 if content:
3626 container = content.viewManager.CreateContainerView(content.rootFolder,
3627 [vim.HostSystem], True)
3628 for host in container.view:
3629 devices = self.get_pci_devices(host, need_devices)
3630 if devices:
3631 host_obj = host
3632 pci_device_objs = devices
3633 break
3634 except Exception as exp:
3635 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3636
3637 return host_obj,pci_device_objs
3638
3639 def relocate_vm(self, dest_host, vm) :
3640 """
3641 Method to get the relocate VM to new host
3642
3643 Args:
3644 dest_host - vSphere host object
3645 vm - vSphere VM object
3646
3647 Returns:
3648 task object
3649 """
3650 task = None
3651 try:
3652 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3653 task = vm.Relocate(relocate_spec)
3654 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3655 except Exception as exp:
3656 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3657 dest_host, vm, exp))
3658 return task
3659
3660 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3661 """
3662 Waits and provides updates on a vSphere task
3663 """
3664 while task.info.state == vim.TaskInfo.State.running:
3665 time.sleep(2)
3666
3667 if task.info.state == vim.TaskInfo.State.success:
3668 if task.info.result is not None and not hideResult:
3669 self.logger.info('{} completed successfully, result: {}'.format(
3670 actionName,
3671 task.info.result))
3672 else:
3673 self.logger.info('Task {} completed successfully.'.format(actionName))
3674 else:
3675 self.logger.error('{} did not complete successfully: {} '.format(
3676 actionName,
3677 task.info.error)
3678 )
3679
3680 return task.info.result
3681
3682 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3683 """
3684 Method to add pci device in given VM
3685
3686 Args:
3687 host_object - vSphere host object
3688 vm_object - vSphere VM object
3689 host_pci_dev - host_pci_dev must be one of the devices from the
3690 host_object.hardware.pciDevice list
3691 which is configured as a PCI passthrough device
3692
3693 Returns:
3694 task object
3695 """
3696 task = None
3697 if vm_object and host_object and host_pci_dev:
3698 try :
3699 #Add PCI device to VM
3700 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3701 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3702
3703 if host_pci_dev.id not in systemid_by_pciid:
3704 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3705 return None
3706
3707 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3708 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3709 id=host_pci_dev.id,
3710 systemId=systemid_by_pciid[host_pci_dev.id],
3711 vendorId=host_pci_dev.vendorId,
3712 deviceName=host_pci_dev.deviceName)
3713
3714 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3715
3716 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3717 new_device_config.operation = "add"
3718 vmConfigSpec = vim.vm.ConfigSpec()
3719 vmConfigSpec.deviceChange = [new_device_config]
3720
3721 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3722 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3723 host_pci_dev, vm_object, host_object)
3724 )
3725 except Exception as exp:
3726 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3727 host_pci_dev,
3728 vm_object,
3729 exp))
3730 return task
3731
3732 def get_vm_vcenter_info(self):
3733 """
3734 Method to get details of vCenter and vm
3735
3736 Args:
3737 vapp_uuid - uuid of vApp or VM
3738
3739 Returns:
3740 Moref Id of VM and deails of vCenter
3741 """
3742 vm_vcenter_info = {}
3743
3744 if self.vcenter_ip is not None:
3745 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3746 else:
3747 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3748 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3749 if self.vcenter_port is not None:
3750 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3751 else:
3752 raise vimconn.vimconnException(message="vCenter port is not provided."\
3753 " Please provide vCenter port while attaching datacenter to tenant in --config")
3754 if self.vcenter_user is not None:
3755 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3756 else:
3757 raise vimconn.vimconnException(message="vCenter user is not provided."\
3758 " Please provide vCenter user while attaching datacenter to tenant in --config")
3759
3760 if self.vcenter_password is not None:
3761 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3762 else:
3763 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3764 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3765
3766 return vm_vcenter_info
3767
3768
3769 def get_vm_pci_details(self, vmuuid):
3770 """
3771 Method to get VM PCI device details from vCenter
3772
3773 Args:
3774 vm_obj - vSphere VM object
3775
3776 Returns:
3777 dict of PCI devives attached to VM
3778
3779 """
3780 vm_pci_devices_info = {}
3781 try:
3782 vcenter_conect, content = self.get_vcenter_content()
3783 vm_moref_id = self.get_vm_moref_id(vmuuid)
3784 if vm_moref_id:
3785 #Get VM and its host
3786 if content:
3787 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3788 if host_obj and vm_obj:
3789 vm_pci_devices_info["host_name"]= host_obj.name
3790 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3791 for device in vm_obj.config.hardware.device:
3792 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3793 device_details={'devide_id':device.backing.id,
3794 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3795 }
3796 vm_pci_devices_info[device.deviceInfo.label] = device_details
3797 else:
3798 self.logger.error("Can not connect to vCenter while getting "\
3799 "PCI devices infromationn")
3800 return vm_pci_devices_info
3801 except Exception as exp:
3802 self.logger.error("Error occurred while getting VM infromationn"\
3803 " for VM : {}".format(exp))
3804 raise vimconn.vimconnException(message=exp)
3805
3806 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3807 """
3808 Method to add network adapter type to vm
3809 Args :
3810 network_name - name of network
3811 primary_nic_index - int value for primary nic index
3812 nicIndex - int value for nic index
3813 nic_type - specify model name to which add to vm
3814 Returns:
3815 None
3816 """
3817
3818 try:
3819 ip_address = None
3820 floating_ip = False
3821 if 'floating_ip' in net: floating_ip = net['floating_ip']
3822
3823 # Stub for ip_address feature
3824 if 'ip_address' in net: ip_address = net['ip_address']
3825
3826 if floating_ip:
3827 allocation_mode = "POOL"
3828 elif ip_address:
3829 allocation_mode = "MANUAL"
3830 else:
3831 allocation_mode = "DHCP"
3832
3833 if not nic_type:
3834 for vms in vapp._get_vms():
3835 vm_id = (vms.id).split(':')[-1]
3836
3837 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3838
3839 response = Http.get(url=url_rest_call,
3840 headers=self.vca.vcloud_session.get_vcloud_headers(),
3841 verify=self.vca.verify,
3842 logger=self.vca.logger)
3843
3844 if response.status_code == 403:
3845 response = self.retry_rest('GET', url_rest_call)
3846
3847 if response.status_code != 200:
3848 self.logger.error("REST call {} failed reason : {}"\
3849 "status code : {}".format(url_rest_call,
3850 response.content,
3851 response.status_code))
3852 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3853 "network connection section")
3854
3855 data = response.content
3856 if '<PrimaryNetworkConnectionIndex>' not in data:
3857 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3858 <NetworkConnection network="{}">
3859 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3860 <IsConnected>true</IsConnected>
3861 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3862 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3863 allocation_mode)
3864 # Stub for ip_address feature
3865 if ip_address:
3866 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3867 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3868
3869 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3870 else:
3871 new_item = """<NetworkConnection network="{}">
3872 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3873 <IsConnected>true</IsConnected>
3874 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3875 </NetworkConnection>""".format(network_name, nicIndex,
3876 allocation_mode)
3877 # Stub for ip_address feature
3878 if ip_address:
3879 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3880 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3881
3882 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3883
3884 headers = self.vca.vcloud_session.get_vcloud_headers()
3885 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3886 response = Http.put(url=url_rest_call, headers=headers, data=data,
3887 verify=self.vca.verify,
3888 logger=self.vca.logger)
3889
3890 if response.status_code == 403:
3891 add_headers = {'Content-Type': headers['Content-Type']}
3892 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3893
3894 if response.status_code != 202:
3895 self.logger.error("REST call {} failed reason : {}"\
3896 "status code : {} ".format(url_rest_call,
3897 response.content,
3898 response.status_code))
3899 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3900 "network connection section")
3901 else:
3902 nic_task = taskType.parseString(response.content, True)
3903 if isinstance(nic_task, GenericTask):
3904 self.vca.block_until_completed(nic_task)
3905 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3906 "default NIC type".format(vm_id))
3907 else:
3908 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3909 "connect NIC type".format(vm_id))
3910 else:
3911 for vms in vapp._get_vms():
3912 vm_id = (vms.id).split(':')[-1]
3913
3914 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3915
3916 response = Http.get(url=url_rest_call,
3917 headers=self.vca.vcloud_session.get_vcloud_headers(),
3918 verify=self.vca.verify,
3919 logger=self.vca.logger)
3920
3921 if response.status_code == 403:
3922 response = self.retry_rest('GET', url_rest_call)
3923
3924 if response.status_code != 200:
3925 self.logger.error("REST call {} failed reason : {}"\
3926 "status code : {}".format(url_rest_call,
3927 response.content,
3928 response.status_code))
3929 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3930 "network connection section")
3931 data = response.content
3932 if '<PrimaryNetworkConnectionIndex>' not in data:
3933 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3934 <NetworkConnection network="{}">
3935 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3936 <IsConnected>true</IsConnected>
3937 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3938 <NetworkAdapterType>{}</NetworkAdapterType>
3939 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3940 allocation_mode, nic_type)
3941 # Stub for ip_address feature
3942 if ip_address:
3943 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3944 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3945
3946 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3947 else:
3948 new_item = """<NetworkConnection network="{}">
3949 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3950 <IsConnected>true</IsConnected>
3951 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3952 <NetworkAdapterType>{}</NetworkAdapterType>
3953 </NetworkConnection>""".format(network_name, nicIndex,
3954 allocation_mode, nic_type)
3955 # Stub for ip_address feature
3956 if ip_address:
3957 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3958 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3959
3960 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3961
3962 headers = self.vca.vcloud_session.get_vcloud_headers()
3963 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3964 response = Http.put(url=url_rest_call, headers=headers, data=data,
3965 verify=self.vca.verify,
3966 logger=self.vca.logger)
3967
3968 if response.status_code == 403:
3969 add_headers = {'Content-Type': headers['Content-Type']}
3970 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3971
3972 if response.status_code != 202:
3973 self.logger.error("REST call {} failed reason : {}"\
3974 "status code : {}".format(url_rest_call,
3975 response.content,
3976 response.status_code))
3977 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3978 "network connection section")
3979 else:
3980 nic_task = taskType.parseString(response.content, True)
3981 if isinstance(nic_task, GenericTask):
3982 self.vca.block_until_completed(nic_task)
3983 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3984 "conneced to NIC type {}".format(vm_id, nic_type))
3985 else:
3986 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3987 "failed to connect NIC type {}".format(vm_id, nic_type))
3988 except Exception as exp:
3989 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3990 "while adding Network adapter")
3991 raise vimconn.vimconnException(message=exp)
3992
3993
3994 def set_numa_affinity(self, vmuuid, paired_threads_id):
3995 """
3996 Method to assign numa affinity in vm configuration parammeters
3997 Args :
3998 vmuuid - vm uuid
3999 paired_threads_id - one or more virtual processor
4000 numbers
4001 Returns:
4002 return if True
4003 """
4004 try:
4005 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
4006 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
4007 context = None
4008 if hasattr(ssl, '_create_unverified_context'):
4009 context = ssl._create_unverified_context()
4010 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
4011 pwd=self.passwd, port=int(vm_vcenter_port),
4012 sslContext=context)
4013 atexit.register(Disconnect, vcenter_conect)
4014 content = vcenter_conect.RetrieveContent()
4015
4016 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4017 if vm_obj:
4018 config_spec = vim.vm.ConfigSpec()
4019 config_spec.extraConfig = []
4020 opt = vim.option.OptionValue()
4021 opt.key = 'numa.nodeAffinity'
4022 opt.value = str(paired_threads_id)
4023 config_spec.extraConfig.append(opt)
4024 task = vm_obj.ReconfigVM_Task(config_spec)
4025 if task:
4026 result = self.wait_for_vcenter_task(task, vcenter_conect)
4027 extra_config = vm_obj.config.extraConfig
4028 flag = False
4029 for opts in extra_config:
4030 if 'numa.nodeAffinity' in opts.key:
4031 flag = True
4032 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4033 "value {} for vm {}".format(opt.value, vm_obj))
4034 if flag:
4035 return
4036 else:
4037 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4038 except Exception as exp:
4039 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4040 "for VM {} : {}".format(vm_obj, vm_moref_id))
4041 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4042 "affinity".format(exp))
4043
4044
4045 def cloud_init(self, vapp, cloud_config):
4046 """
4047 Method to inject ssh-key
4048 vapp - vapp object
4049 cloud_config a dictionary with:
4050 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4051 'users': (optional) list of users to be inserted, each item is a dict with:
4052 'name': (mandatory) user name,
4053 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4054 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4055 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4056 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4057 'dest': (mandatory) string with the destination absolute path
4058 'encoding': (optional, by default text). Can be one of:
4059 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4060 'content' (mandatory): string with the content of the file
4061 'permissions': (optional) string with file permissions, typically octal notation '0644'
4062 'owner': (optional) file owner, string with the format 'owner:group'
4063 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4064 """
4065 try:
4066 if not isinstance(cloud_config, dict):
4067 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4068 else:
4069 key_pairs = []
4070 userdata = []
4071 if "key-pairs" in cloud_config:
4072 key_pairs = cloud_config["key-pairs"]
4073
4074 if "users" in cloud_config:
4075 userdata = cloud_config["users"]
4076
4077 self.logger.debug("cloud_init : Guest os customization started..")
4078 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4079 self.guest_customization(vapp, customize_script)
4080
4081 except Exception as exp:
4082 self.logger.error("cloud_init : exception occurred while injecting "\
4083 "ssh-key")
4084 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4085 "ssh-key".format(exp))
4086
4087 def format_script(self, key_pairs=[], users_list=[]):
4088 bash_script = """
4089 #!/bin/bash
4090 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4091 if [ "$1" = "precustomization" ];then
4092 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4093 """
4094
4095 keys = "\n".join(key_pairs)
4096 if keys:
4097 keys_data = """
4098 if [ ! -d /root/.ssh ];then
4099 mkdir /root/.ssh
4100 chown root:root /root/.ssh
4101 chmod 700 /root/.ssh
4102 touch /root/.ssh/authorized_keys
4103 chown root:root /root/.ssh/authorized_keys
4104 chmod 600 /root/.ssh/authorized_keys
4105 # make centos with selinux happy
4106 which restorecon && restorecon -Rv /root/.ssh
4107 else
4108 touch /root/.ssh/authorized_keys
4109 chown root:root /root/.ssh/authorized_keys
4110 chmod 600 /root/.ssh/authorized_keys
4111 fi
4112 echo '{key}' >> /root/.ssh/authorized_keys
4113 """.format(key=keys)
4114
4115 bash_script+= keys_data
4116
4117 for user in users_list:
4118 if 'name' in user: user_name = user['name']
4119 if 'key-pairs' in user:
4120 user_keys = "\n".join(user['key-pairs'])
4121 else:
4122 user_keys = None
4123
4124 add_user_name = """
4125 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4126 """.format(user_name=user_name)
4127
4128 bash_script+= add_user_name
4129
4130 if user_keys:
4131 user_keys_data = """
4132 mkdir /home/{user_name}/.ssh
4133 chown {user_name}:{user_name} /home/{user_name}/.ssh
4134 chmod 700 /home/{user_name}/.ssh
4135 touch /home/{user_name}/.ssh/authorized_keys
4136 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4137 chmod 600 /home/{user_name}/.ssh/authorized_keys
4138 # make centos with selinux happy
4139 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4140 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4141 """.format(user_name=user_name,user_key=user_keys)
4142
4143 bash_script+= user_keys_data
4144
4145 return bash_script+"\n\tfi"
4146
4147 def guest_customization(self, vapp, customize_script):
4148 """
4149 Method to customize guest os
4150 vapp - Vapp object
4151 customize_script - Customize script to be run at first boot of VM.
4152 """
4153 for vm in vapp._get_vms():
4154 vm_name = vm.name
4155 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4156 if isinstance(task, GenericTask):
4157 self.vca.block_until_completed(task)
4158 self.logger.info("guest_customization : customized guest os task "\
4159 "completed for VM {}".format(vm_name))
4160 else:
4161 self.logger.error("guest_customization : task for customized guest os"\
4162 "failed for VM {}".format(vm_name))
4163 raise vimconn.vimconnException("guest_customization : failed to perform"\
4164 "guest os customization on VM {}".format(vm_name))
4165
4166 def add_new_disk(self, vapp_uuid, disk_size):
4167 """
4168 Method to create an empty vm disk
4169
4170 Args:
4171 vapp_uuid - is vapp identifier.
4172 disk_size - size of disk to be created in GB
4173
4174 Returns:
4175 None
4176 """
4177 status = False
4178 vm_details = None
4179 try:
4180 #Disk size in GB, convert it into MB
4181 if disk_size is not None:
4182 disk_size_mb = int(disk_size) * 1024
4183 vm_details = self.get_vapp_details_rest(vapp_uuid)
4184
4185 if vm_details and "vm_virtual_hardware" in vm_details:
4186 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4187 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4188 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4189
4190 except Exception as exp:
4191 msg = "Error occurred while creating new disk {}.".format(exp)
4192 self.rollback_newvm(vapp_uuid, msg)
4193
4194 if status:
4195 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4196 else:
4197 #If failed to add disk, delete VM
4198 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4199 self.rollback_newvm(vapp_uuid, msg)
4200
4201
4202 def add_new_disk_rest(self, disk_href, disk_size_mb):
4203 """
4204 Retrives vApp Disks section & add new empty disk
4205
4206 Args:
4207 disk_href: Disk section href to addd disk
4208 disk_size_mb: Disk size in MB
4209
4210 Returns: Status of add new disk task
4211 """
4212 status = False
4213 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4214 response = Http.get(url=disk_href,
4215 headers=self.vca.vcloud_session.get_vcloud_headers(),
4216 verify=self.vca.verify,
4217 logger=self.vca.logger)
4218
4219 if response.status_code == 403:
4220 response = self.retry_rest('GET', disk_href)
4221
4222 if response.status_code != requests.codes.ok:
4223 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4224 .format(disk_href, response.status_code))
4225 return status
4226 try:
4227 #Find but type & max of instance IDs assigned to disks
4228 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4229 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4230 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4231 instance_id = 0
4232 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4233 if item.find("rasd:Description",namespaces).text == "Hard disk":
4234 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4235 if inst_id > instance_id:
4236 instance_id = inst_id
4237 disk_item = item.find("rasd:HostResource" ,namespaces)
4238 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4239 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4240
4241 instance_id = instance_id + 1
4242 new_item = """<Item>
4243 <rasd:Description>Hard disk</rasd:Description>
4244 <rasd:ElementName>New disk</rasd:ElementName>
4245 <rasd:HostResource
4246 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4247 vcloud:capacity="{}"
4248 vcloud:busSubType="{}"
4249 vcloud:busType="{}"></rasd:HostResource>
4250 <rasd:InstanceID>{}</rasd:InstanceID>
4251 <rasd:ResourceType>17</rasd:ResourceType>
4252 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4253
4254 new_data = response.content
4255 #Add new item at the bottom
4256 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4257
4258 # Send PUT request to modify virtual hardware section with new disk
4259 headers = self.vca.vcloud_session.get_vcloud_headers()
4260 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4261
4262 response = Http.put(url=disk_href,
4263 data=new_data,
4264 headers=headers,
4265 verify=self.vca.verify, logger=self.logger)
4266
4267 if response.status_code == 403:
4268 add_headers = {'Content-Type': headers['Content-Type']}
4269 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4270
4271 if response.status_code != 202:
4272 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4273 .format(disk_href, response.status_code, response.content))
4274 else:
4275 add_disk_task = taskType.parseString(response.content, True)
4276 if type(add_disk_task) is GenericTask:
4277 status = self.vca.block_until_completed(add_disk_task)
4278 if not status:
4279 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4280
4281 except Exception as exp:
4282 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4283
4284 return status
4285
4286
4287 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4288 """
4289 Method to add existing disk to vm
4290 Args :
4291 catalogs - List of VDC catalogs
4292 image_id - Catalog ID
4293 template_name - Name of template in catalog
4294 vapp_uuid - UUID of vApp
4295 Returns:
4296 None
4297 """
4298 disk_info = None
4299 vcenter_conect, content = self.get_vcenter_content()
4300 #find moref-id of vm in image
4301 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4302 image_id=image_id,
4303 )
4304
4305 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4306 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4307 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4308 if catalog_vm_moref_id:
4309 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4310 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4311 if catalog_vm_obj:
4312 #find existing disk
4313 disk_info = self.find_disk(catalog_vm_obj)
4314 else:
4315 exp_msg = "No VM with image id {} found".format(image_id)
4316 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4317 else:
4318 exp_msg = "No Image found with image ID {} ".format(image_id)
4319 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4320
4321 if disk_info:
4322 self.logger.info("Existing disk_info : {}".format(disk_info))
4323 #get VM
4324 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4325 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4326 if vm_obj:
4327 status = self.add_disk(vcenter_conect=vcenter_conect,
4328 vm=vm_obj,
4329 disk_info=disk_info,
4330 size=size,
4331 vapp_uuid=vapp_uuid
4332 )
4333 if status:
4334 self.logger.info("Disk from image id {} added to {}".format(image_id,
4335 vm_obj.config.name)
4336 )
4337 else:
4338 msg = "No disk found with image id {} to add in VM {}".format(
4339 image_id,
4340 vm_obj.config.name)
4341 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4342
4343
4344 def find_disk(self, vm_obj):
4345 """
4346 Method to find details of existing disk in VM
4347 Args :
4348 vm_obj - vCenter object of VM
4349 image_id - Catalog ID
4350 Returns:
4351 disk_info : dict of disk details
4352 """
4353 disk_info = {}
4354 if vm_obj:
4355 try:
4356 devices = vm_obj.config.hardware.device
4357 for device in devices:
4358 if type(device) is vim.vm.device.VirtualDisk:
4359 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4360 disk_info["full_path"] = device.backing.fileName
4361 disk_info["datastore"] = device.backing.datastore
4362 disk_info["capacityKB"] = device.capacityInKB
4363 break
4364 except Exception as exp:
4365 self.logger.error("find_disk() : exception occurred while "\
4366 "getting existing disk details :{}".format(exp))
4367 return disk_info
4368
4369
4370 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4371 """
4372 Method to add existing disk in VM
4373 Args :
4374 vcenter_conect - vCenter content object
4375 vm - vCenter vm object
4376 disk_info : dict of disk details
4377 Returns:
4378 status : status of add disk task
4379 """
4380 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4381 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4382 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4383 if size is not None:
4384 #Convert size from GB to KB
4385 sizeKB = int(size) * 1024 * 1024
4386 #compare size of existing disk and user given size.Assign whicherver is greater
4387 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4388 sizeKB, capacityKB))
4389 if sizeKB > capacityKB:
4390 capacityKB = sizeKB
4391
4392 if datastore and fullpath and capacityKB:
4393 try:
4394 spec = vim.vm.ConfigSpec()
4395 # get all disks on a VM, set unit_number to the next available
4396 unit_number = 0
4397 for dev in vm.config.hardware.device:
4398 if hasattr(dev.backing, 'fileName'):
4399 unit_number = int(dev.unitNumber) + 1
4400 # unit_number 7 reserved for scsi controller
4401 if unit_number == 7:
4402 unit_number += 1
4403 if isinstance(dev, vim.vm.device.VirtualDisk):
4404 #vim.vm.device.VirtualSCSIController
4405 controller_key = dev.controllerKey
4406
4407 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4408 unit_number, controller_key))
4409 # add disk here
4410 dev_changes = []
4411 disk_spec = vim.vm.device.VirtualDeviceSpec()
4412 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4413 disk_spec.device = vim.vm.device.VirtualDisk()
4414 disk_spec.device.backing = \
4415 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4416 disk_spec.device.backing.thinProvisioned = True
4417 disk_spec.device.backing.diskMode = 'persistent'
4418 disk_spec.device.backing.datastore = datastore
4419 disk_spec.device.backing.fileName = fullpath
4420
4421 disk_spec.device.unitNumber = unit_number
4422 disk_spec.device.capacityInKB = capacityKB
4423 disk_spec.device.controllerKey = controller_key
4424 dev_changes.append(disk_spec)
4425 spec.deviceChange = dev_changes
4426 task = vm.ReconfigVM_Task(spec=spec)
4427 status = self.wait_for_vcenter_task(task, vcenter_conect)
4428 return status
4429 except Exception as exp:
4430 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4431 "{} to vm {}".format(exp,
4432 fullpath,
4433 vm.config.name)
4434 self.rollback_newvm(vapp_uuid, exp_msg)
4435 else:
4436 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4437 self.rollback_newvm(vapp_uuid, msg)
4438
4439
4440 def get_vcenter_content(self):
4441 """
4442 Get the vsphere content object
4443 """
4444 try:
4445 vm_vcenter_info = self.get_vm_vcenter_info()
4446 except Exception as exp:
4447 self.logger.error("Error occurred while getting vCenter infromationn"\
4448 " for VM : {}".format(exp))
4449 raise vimconn.vimconnException(message=exp)
4450
4451 context = None
4452 if hasattr(ssl, '_create_unverified_context'):
4453 context = ssl._create_unverified_context()
4454
4455 vcenter_conect = SmartConnect(
4456 host=vm_vcenter_info["vm_vcenter_ip"],
4457 user=vm_vcenter_info["vm_vcenter_user"],
4458 pwd=vm_vcenter_info["vm_vcenter_password"],
4459 port=int(vm_vcenter_info["vm_vcenter_port"]),
4460 sslContext=context
4461 )
4462 atexit.register(Disconnect, vcenter_conect)
4463 content = vcenter_conect.RetrieveContent()
4464 return vcenter_conect, content
4465
4466
4467 def get_vm_moref_id(self, vapp_uuid):
4468 """
4469 Get the moref_id of given VM
4470 """
4471 try:
4472 if vapp_uuid:
4473 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4474 if vm_details and "vm_vcenter_info" in vm_details:
4475 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4476
4477 return vm_moref_id
4478
4479 except Exception as exp:
4480 self.logger.error("Error occurred while getting VM moref ID "\
4481 " for VM : {}".format(exp))
4482 return None
4483
4484
4485 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4486 """
4487 Method to get vApp template details
4488 Args :
4489 catalogs - list of VDC catalogs
4490 image_id - Catalog ID to find
4491 template_name : template name in catalog
4492 Returns:
4493 parsed_respond : dict of vApp tempalte details
4494 """
4495 parsed_response = {}
4496
4497 vca = self.connect_as_admin()
4498 if not vca:
4499 raise vimconn.vimconnConnectionException("self.connect() is failed")
4500
4501 try:
4502 catalog = self.get_catalog_obj(image_id, catalogs)
4503 if catalog:
4504 template_name = self.get_catalogbyid(image_id, catalogs)
4505 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4506 if len(catalog_items) == 1:
4507 response = Http.get(catalog_items[0].get_href(),
4508 headers=vca.vcloud_session.get_vcloud_headers(),
4509 verify=vca.verify,
4510 logger=vca.logger)
4511 catalogItem = XmlElementTree.fromstring(response.content)
4512 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4513 vapp_tempalte_href = entity.get("href")
4514 #get vapp details and parse moref id
4515
4516 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4517 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4518 'vmw': 'http://www.vmware.com/schema/ovf',
4519 'vm': 'http://www.vmware.com/vcloud/v1.5',
4520 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4521 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4522 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4523 }
4524
4525 if vca.vcloud_session and vca.vcloud_session.organization:
4526 response = Http.get(url=vapp_tempalte_href,
4527 headers=vca.vcloud_session.get_vcloud_headers(),
4528 verify=vca.verify,
4529 logger=vca.logger
4530 )
4531
4532 if response.status_code != requests.codes.ok:
4533 self.logger.debug("REST API call {} failed. Return status code {}".format(
4534 vapp_tempalte_href, response.status_code))
4535
4536 else:
4537 xmlroot_respond = XmlElementTree.fromstring(response.content)
4538 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4539 if children_section is not None:
4540 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4541 if vCloud_extension_section is not None:
4542 vm_vcenter_info = {}
4543 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4544 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4545 if vmext is not None:
4546 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4547 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4548
4549 except Exception as exp :
4550 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4551
4552 return parsed_response
4553
4554
4555 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4556 """
4557 Method to delete vApp
4558 Args :
4559 vapp_uuid - vApp UUID
4560 msg - Error message to be logged
4561 exp_type : Exception type
4562 Returns:
4563 None
4564 """
4565 if vapp_uuid:
4566 status = self.delete_vminstance(vapp_uuid)
4567 else:
4568 msg = "No vApp ID"
4569 self.logger.error(msg)
4570 if exp_type == "Genric":
4571 raise vimconn.vimconnException(msg)
4572 elif exp_type == "NotFound":
4573 raise vimconn.vimconnNotFoundException(message=msg)
4574
4575 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4576 """
4577 Method to attach SRIOV adapters to VM
4578
4579 Args:
4580 vapp_uuid - uuid of vApp/VM
4581 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4582 vmname_andid - vmname
4583
4584 Returns:
4585 The status of add SRIOV adapter task , vm object and
4586 vcenter_conect object
4587 """
4588 vm_obj = None
4589 vcenter_conect, content = self.get_vcenter_content()
4590 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4591
4592 if vm_moref_id:
4593 try:
4594 no_of_sriov_devices = len(sriov_nets)
4595 if no_of_sriov_devices > 0:
4596 #Get VM and its host
4597 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4598 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4599 if host_obj and vm_obj:
4600 #get SRIOV devies from host on which vapp is currently installed
4601 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4602 no_of_sriov_devices,
4603 )
4604
4605 if len(avilable_sriov_devices) == 0:
4606 #find other hosts with active pci devices
4607 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4608 content,
4609 no_of_sriov_devices,
4610 )
4611
4612 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4613 #Migrate vm to the host where SRIOV devices are available
4614 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4615 new_host_obj))
4616 task = self.relocate_vm(new_host_obj, vm_obj)
4617 if task is not None:
4618 result = self.wait_for_vcenter_task(task, vcenter_conect)
4619 self.logger.info("Migrate VM status: {}".format(result))
4620 host_obj = new_host_obj
4621 else:
4622 self.logger.info("Fail to migrate VM : {}".format(result))
4623 raise vimconn.vimconnNotFoundException(
4624 "Fail to migrate VM : {} to host {}".format(
4625 vmname_andid,
4626 new_host_obj)
4627 )
4628
4629 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4630 #Add SRIOV devices one by one
4631 for sriov_net in sriov_nets:
4632 network_name = sriov_net.get('net_id')
4633 dvs_portgr_name = self.create_dvPort_group(network_name)
4634 if sriov_net.get('type') == "VF":
4635 #add vlan ID ,Modify portgroup for vlan ID
4636 self.configure_vlanID(content, vcenter_conect, network_name)
4637
4638 task = self.add_sriov_to_vm(content,
4639 vm_obj,
4640 host_obj,
4641 network_name,
4642 avilable_sriov_devices[0]
4643 )
4644 if task:
4645 status= self.wait_for_vcenter_task(task, vcenter_conect)
4646 if status:
4647 self.logger.info("Added SRIOV {} to VM {}".format(
4648 no_of_sriov_devices,
4649 str(vm_obj)))
4650 else:
4651 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4652 no_of_sriov_devices,
4653 str(vm_obj)))
4654 raise vimconn.vimconnUnexpectedResponse(
4655 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4656 )
4657 return True, vm_obj, vcenter_conect
4658 else:
4659 self.logger.error("Currently there is no host with"\
4660 " {} number of avaialble SRIOV "\
4661 "VFs required for VM {}".format(
4662 no_of_sriov_devices,
4663 vmname_andid)
4664 )
4665 raise vimconn.vimconnNotFoundException(
4666 "Currently there is no host with {} "\
4667 "number of avaialble SRIOV devices required for VM {}".format(
4668 no_of_sriov_devices,
4669 vmname_andid))
4670 else:
4671 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4672
4673 except vmodl.MethodFault as error:
4674 self.logger.error("Error occurred while adding SRIOV {} ",error)
4675 return None, vm_obj, vcenter_conect
4676
4677
4678 def get_sriov_devices(self,host, no_of_vfs):
4679 """
4680 Method to get the details of SRIOV devices on given host
4681 Args:
4682 host - vSphere host object
4683 no_of_vfs - number of VFs needed on host
4684
4685 Returns:
4686 array of SRIOV devices
4687 """
4688 sriovInfo=[]
4689 if host:
4690 for device in host.config.pciPassthruInfo:
4691 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4692 if device.numVirtualFunction >= no_of_vfs:
4693 sriovInfo.append(device)
4694 break
4695 return sriovInfo
4696
4697
4698 def get_host_and_sriov_devices(self, content, no_of_vfs):
4699 """
4700 Method to get the details of SRIOV devices infromation on all hosts
4701
4702 Args:
4703 content - vSphere host object
4704 no_of_vfs - number of pci VFs needed on host
4705
4706 Returns:
4707 array of SRIOV devices and host object
4708 """
4709 host_obj = None
4710 sriov_device_objs = None
4711 try:
4712 if content:
4713 container = content.viewManager.CreateContainerView(content.rootFolder,
4714 [vim.HostSystem], True)
4715 for host in container.view:
4716 devices = self.get_sriov_devices(host, no_of_vfs)
4717 if devices:
4718 host_obj = host
4719 sriov_device_objs = devices
4720 break
4721 except Exception as exp:
4722 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4723
4724 return host_obj,sriov_device_objs
4725
4726
4727 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4728 """
4729 Method to add SRIOV adapter to vm
4730
4731 Args:
4732 host_obj - vSphere host object
4733 vm_obj - vSphere vm object
4734 content - vCenter content object
4735 network_name - name of distributed virtaul portgroup
4736 sriov_device - SRIOV device info
4737
4738 Returns:
4739 task object
4740 """
4741 devices = []
4742 vnic_label = "sriov nic"
4743 try:
4744 dvs_portgr = self.get_dvport_group(network_name)
4745 network_name = dvs_portgr.name
4746 nic = vim.vm.device.VirtualDeviceSpec()
4747 # VM device
4748 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4749 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4750 nic.device.addressType = 'assigned'
4751 #nic.device.key = 13016
4752 nic.device.deviceInfo = vim.Description()
4753 nic.device.deviceInfo.label = vnic_label
4754 nic.device.deviceInfo.summary = network_name
4755 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4756
4757 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4758 nic.device.backing.deviceName = network_name
4759 nic.device.backing.useAutoDetect = False
4760 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4761 nic.device.connectable.startConnected = True
4762 nic.device.connectable.allowGuestControl = True
4763
4764 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4765 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4766 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4767
4768 devices.append(nic)
4769 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4770 task = vm_obj.ReconfigVM_Task(vmconf)
4771 return task
4772 except Exception as exp:
4773 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4774 return None
4775
4776
4777 def create_dvPort_group(self, network_name):
4778 """
4779 Method to create disributed virtual portgroup
4780
4781 Args:
4782 network_name - name of network/portgroup
4783
4784 Returns:
4785 portgroup key
4786 """
4787 try:
4788 new_network_name = [network_name, '-', str(uuid.uuid4())]
4789 network_name=''.join(new_network_name)
4790 vcenter_conect, content = self.get_vcenter_content()
4791
4792 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4793 if dv_switch:
4794 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4795 dv_pg_spec.name = network_name
4796
4797 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4798 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4799 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4800 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4801 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4802 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4803
4804 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4805 self.wait_for_vcenter_task(task, vcenter_conect)
4806
4807 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4808 if dvPort_group:
4809 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4810 return dvPort_group.key
4811 else:
4812 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4813
4814 except Exception as exp:
4815 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4816 " : {}".format(network_name, exp))
4817 return None
4818
4819 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4820 """
4821 Method to reconfigure disributed virtual portgroup
4822
4823 Args:
4824 dvPort_group_name - name of disributed virtual portgroup
4825 content - vCenter content object
4826 config_info - disributed virtual portgroup configuration
4827
4828 Returns:
4829 task object
4830 """
4831 try:
4832 dvPort_group = self.get_dvport_group(dvPort_group_name)
4833 if dvPort_group:
4834 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4835 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4836 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4837 if "vlanID" in config_info:
4838 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4839 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4840
4841 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4842 return task
4843 else:
4844 return None
4845 except Exception as exp:
4846 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4847 " : {}".format(dvPort_group_name, exp))
4848 return None
4849
4850
4851 def destroy_dvport_group(self , dvPort_group_name):
4852 """
4853 Method to destroy disributed virtual portgroup
4854
4855 Args:
4856 network_name - name of network/portgroup
4857
4858 Returns:
4859 True if portgroup successfully got deleted else false
4860 """
4861 vcenter_conect, content = self.get_vcenter_content()
4862 try:
4863 status = None
4864 dvPort_group = self.get_dvport_group(dvPort_group_name)
4865 if dvPort_group:
4866 task = dvPort_group.Destroy_Task()
4867 status = self.wait_for_vcenter_task(task, vcenter_conect)
4868 return status
4869 except vmodl.MethodFault as exp:
4870 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4871 exp, dvPort_group_name))
4872 return None
4873
4874
4875 def get_dvport_group(self, dvPort_group_name):
4876 """
4877 Method to get disributed virtual portgroup
4878
4879 Args:
4880 network_name - name of network/portgroup
4881
4882 Returns:
4883 portgroup object
4884 """
4885 vcenter_conect, content = self.get_vcenter_content()
4886 dvPort_group = None
4887 try:
4888 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4889 for item in container.view:
4890 if item.key == dvPort_group_name:
4891 dvPort_group = item
4892 break
4893 return dvPort_group
4894 except vmodl.MethodFault as exp:
4895 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4896 exp, dvPort_group_name))
4897 return None
4898
4899 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4900 """
4901 Method to get disributed virtual portgroup vlanID
4902
4903 Args:
4904 network_name - name of network/portgroup
4905
4906 Returns:
4907 vlan ID
4908 """
4909 vlanId = None
4910 try:
4911 dvPort_group = self.get_dvport_group(dvPort_group_name)
4912 if dvPort_group:
4913 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4914 except vmodl.MethodFault as exp:
4915 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4916 exp, dvPort_group_name))
4917 return vlanId
4918
4919
4920 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4921 """
4922 Method to configure vlanID in disributed virtual portgroup vlanID
4923
4924 Args:
4925 network_name - name of network/portgroup
4926
4927 Returns:
4928 None
4929 """
4930 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4931 if vlanID == 0:
4932 #configure vlanID
4933 vlanID = self.genrate_vlanID(dvPort_group_name)
4934 config = {"vlanID":vlanID}
4935 task = self.reconfig_portgroup(content, dvPort_group_name,
4936 config_info=config)
4937 if task:
4938 status= self.wait_for_vcenter_task(task, vcenter_conect)
4939 if status:
4940 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4941 dvPort_group_name,vlanID))
4942 else:
4943 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4944 dvPort_group_name, vlanID))
4945
4946
4947 def genrate_vlanID(self, network_name):
4948 """
4949 Method to get unused vlanID
4950 Args:
4951 network_name - name of network/portgroup
4952 Returns:
4953 vlanID
4954 """
4955 vlan_id = None
4956 used_ids = []
4957 if self.config.get('vlanID_range') == None:
4958 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4959 "at config value before creating sriov network with vlan tag")
4960 if "used_vlanIDs" not in self.persistent_info:
4961 self.persistent_info["used_vlanIDs"] = {}
4962 else:
4963 used_ids = self.persistent_info["used_vlanIDs"].values()
4964
4965 for vlanID_range in self.config.get('vlanID_range'):
4966 start_vlanid , end_vlanid = vlanID_range.split("-")
4967 if start_vlanid > end_vlanid:
4968 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4969 vlanID_range))
4970
4971 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4972 if id not in used_ids:
4973 vlan_id = id
4974 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4975 return vlan_id
4976 if vlan_id is None:
4977 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4978
4979
4980 def get_obj(self, content, vimtype, name):
4981 """
4982 Get the vsphere object associated with a given text name
4983 """
4984 obj = None
4985 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4986 for item in container.view:
4987 if item.name == name:
4988 obj = item
4989 break
4990 return obj
4991
4992
4993 def insert_media_to_vm(self, vapp, image_id):
4994 """
4995 Method to insert media CD-ROM (ISO image) from catalog to vm.
4996 vapp - vapp object to get vm id
4997 Image_id - image id for cdrom to be inerted to vm
4998 """
4999 # create connection object
5000 vca = self.connect()
5001 try:
5002 # fetching catalog details
5003 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5004 response = Http.get(url=rest_url,
5005 headers=vca.vcloud_session.get_vcloud_headers(),
5006 verify=vca.verify,
5007 logger=vca.logger)
5008
5009 if response.status_code != 200:
5010 self.logger.error("REST call {} failed reason : {}"\
5011 "status code : {}".format(url_rest_call,
5012 response.content,
5013 response.status_code))
5014 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5015 "catalog details")
5016 # searching iso name and id
5017 iso_name,media_id = self.get_media_details(vca, response.content)
5018
5019 if iso_name and media_id:
5020 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5021 <ns6:MediaInsertOrEjectParams
5022 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5023 <ns6:Media
5024 type="application/vnd.vmware.vcloud.media+xml"
5025 name="{}.iso"
5026 id="urn:vcloud:media:{}"
5027 href="https://{}/api/media/{}"/>
5028 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5029 vca.host,media_id)
5030
5031 for vms in vapp._get_vms():
5032 vm_id = (vms.id).split(':')[-1]
5033
5034 headers = vca.vcloud_session.get_vcloud_headers()
5035 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5036 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5037
5038 response = Http.post(url=rest_url,
5039 headers=headers,
5040 data=data,
5041 verify=vca.verify,
5042 logger=vca.logger)
5043
5044 if response.status_code != 202:
5045 self.logger.error("Failed to insert CD-ROM to vm")
5046 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5047 "ISO image to vm")
5048 else:
5049 task = taskType.parseString(response.content, True)
5050 if isinstance(task, GenericTask):
5051 vca.block_until_completed(task)
5052 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5053 " image to vm {}".format(vm_id))
5054 except Exception as exp:
5055 self.logger.error("insert_media_to_vm() : exception occurred "\
5056 "while inserting media CD-ROM")
5057 raise vimconn.vimconnException(message=exp)
5058
5059
5060 def get_media_details(self, vca, content):
5061 """
5062 Method to get catalog item details
5063 vca - connection object
5064 content - Catalog details
5065 Return - Media name, media id
5066 """
5067 cataloghref_list = []
5068 try:
5069 if content:
5070 vm_list_xmlroot = XmlElementTree.fromstring(content)
5071 for child in vm_list_xmlroot.iter():
5072 if 'CatalogItem' in child.tag:
5073 cataloghref_list.append(child.attrib.get('href'))
5074 if cataloghref_list is not None:
5075 for href in cataloghref_list:
5076 if href:
5077 response = Http.get(url=href,
5078 headers=vca.vcloud_session.get_vcloud_headers(),
5079 verify=vca.verify,
5080 logger=vca.logger)
5081 if response.status_code != 200:
5082 self.logger.error("REST call {} failed reason : {}"\
5083 "status code : {}".format(href,
5084 response.content,
5085 response.status_code))
5086 raise vimconn.vimconnException("get_media_details : Failed to get "\
5087 "catalogitem details")
5088 list_xmlroot = XmlElementTree.fromstring(response.content)
5089 for child in list_xmlroot.iter():
5090 if 'Entity' in child.tag:
5091 if 'media' in child.attrib.get('href'):
5092 name = child.attrib.get('name')
5093 media_id = child.attrib.get('href').split('/').pop()
5094 return name,media_id
5095 else:
5096 self.logger.debug("Media name and id not found")
5097 return False,False
5098 except Exception as exp:
5099 self.logger.error("get_media_details : exception occurred "\
5100 "getting media details")
5101 raise vimconn.vimconnException(message=exp)
5102
5103
5104 def retry_rest(self, method, url, add_headers=None, data=None):
5105 """ Method to get Token & retry respective REST request
5106 Args:
5107 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5108 url - request url to be used
5109 add_headers - Additional headers (optional)
5110 data - Request payload data to be passed in request
5111 Returns:
5112 response - Response of request
5113 """
5114 response = None
5115
5116 #Get token
5117 self.get_token()
5118
5119 headers=self.vca.vcloud_session.get_vcloud_headers()
5120
5121 if add_headers:
5122 headers.update(add_headers)
5123
5124 if method == 'GET':
5125 response = Http.get(url=url,
5126 headers=headers,
5127 verify=self.vca.verify,
5128 logger=self.vca.logger)
5129 elif method == 'PUT':
5130 response = Http.put(url=url,
5131 data=data,
5132 headers=headers,
5133 verify=self.vca.verify,
5134 logger=self.logger)
5135 elif method == 'POST':
5136 response = Http.post(url=url,
5137 headers=headers,
5138 data=data,
5139 verify=self.vca.verify,
5140 logger=self.vca.logger)
5141 elif method == 'DELETE':
5142 response = Http.delete(url=url,
5143 headers=headers,
5144 verify=self.vca.verify,
5145 logger=self.vca.logger)
5146 return response
5147
5148
5149 def get_token(self):
5150 """ Generate a new token if expired
5151
5152 Returns:
5153 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5154 """
5155 vca = None
5156
5157 try:
5158 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5159 self.user,
5160 self.org_name))
5161 vca = VCA(host=self.url,
5162 username=self.user,
5163 service_type=STANDALONE,
5164 version=VCAVERSION,
5165 verify=False,
5166 log=False)
5167
5168 result = vca.login(password=self.passwd, org=self.org_name)
5169 if result is True:
5170 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5171 if result is True:
5172 self.logger.info(
5173 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5174 #Update vca
5175 self.vca = vca
5176 return
5177
5178 except:
5179 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5180 "{} as user: {}".format(self.org_name, self.user))
5181
5182 if not vca or not result:
5183 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5184
5185
5186 def get_vdc_details(self):
5187 """ Get VDC details using pyVcloud Lib
5188
5189 Returns vdc object
5190 """
5191 vdc = self.vca.get_vdc(self.tenant_name)
5192
5193 #Retry once, if failed by refreshing token
5194 if vdc is None:
5195 self.get_token()
5196 vdc = self.vca.get_vdc(self.tenant_name)
5197
5198 return vdc
5199
5200