Modified vCD connector method with minor changes
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 if not networks:
675 vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
676
677 for network in networks:
678 vdc_network_id = network.get_id().split(":")
679 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
680 filter_dict["name"] = network.get_name()
681 filter_dict["id"] = vdc_network_id[3]
682 filter_dict["shared"] = network.get_IsShared()
683 filter_dict["tenant_id"] = vdc_id
684 if network.get_status() == 1:
685 filter_dict["admin_state_up"] = True
686 else:
687 filter_dict["admin_state_up"] = False
688 filter_dict["status"] = "ACTIVE"
689 filter_dict["type"] = "bridge"
690 self.logger.debug("Returning {}".format(filter_dict))
691 return filter_dict
692 else:
693 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
694
695 except Exception as e:
696 self.logger.debug("Error in get_network")
697 self.logger.debug(traceback.format_exc())
698 if isinstance(e, vimconn.vimconnException):
699 raise
700 else:
701 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
702
703 return filter_dict
704
705 def delete_network(self, net_id):
706 """
707 Method Deletes a tenant network from VIM, provide the network id.
708
709 Returns the network identifier or raise an exception
710 """
711
712 # ############# Stub code for SRIOV #################
713 # dvport_group = self.get_dvport_group(net_id)
714 # if dvport_group:
715 # #delete portgroup
716 # status = self.destroy_dvport_group(net_id)
717 # if status:
718 # # Remove vlanID from persistent info
719 # if net_id in self.persistent_info["used_vlanIDs"]:
720 # del self.persistent_info["used_vlanIDs"][net_id]
721 #
722 # return net_id
723
724 vcd_network = self.get_vcd_network(network_uuid=net_id)
725 if vcd_network is not None and vcd_network:
726 if self.delete_network_action(network_uuid=net_id):
727 return net_id
728 else:
729 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
730
731 def refresh_nets_status(self, net_list):
732 """Get the status of the networks
733 Params: the list of network identifiers
734 Returns a dictionary with:
735 net_id: #VIM id of this network
736 status: #Mandatory. Text with one of:
737 # DELETED (not found at vim)
738 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
739 # OTHER (Vim reported other status not understood)
740 # ERROR (VIM indicates an ERROR status)
741 # ACTIVE, INACTIVE, DOWN (admin down),
742 # BUILD (on building process)
743 #
744 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
745 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
746
747 """
748
749 dict_entry = {}
750 try:
751 for net in net_list:
752 errormsg = ''
753 vcd_network = self.get_vcd_network(network_uuid=net)
754 if vcd_network is not None and vcd_network:
755 if vcd_network['status'] == '1':
756 status = 'ACTIVE'
757 else:
758 status = 'DOWN'
759 else:
760 status = 'DELETED'
761 errormsg = 'Network not found.'
762
763 dict_entry[net] = {'status': status, 'error_msg': errormsg,
764 'vim_info': yaml.safe_dump(vcd_network)}
765 except:
766 self.logger.debug("Error in refresh_nets_status")
767 self.logger.debug(traceback.format_exc())
768
769 return dict_entry
770
771 def get_flavor(self, flavor_id):
772 """Obtain flavor details from the VIM
773 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
774 """
775 if flavor_id not in vimconnector.flavorlist:
776 raise vimconn.vimconnNotFoundException("Flavor not found.")
777 return vimconnector.flavorlist[flavor_id]
778
779 def new_flavor(self, flavor_data):
780 """Adds a tenant flavor to VIM
781 flavor_data contains a dictionary with information, keys:
782 name: flavor name
783 ram: memory (cloud type) in MBytes
784 vpcus: cpus (cloud type)
785 extended: EPA parameters
786 - numas: #items requested in same NUMA
787 memory: number of 1G huge pages memory
788 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
789 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
790 - name: interface name
791 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
792 bandwidth: X Gbps; requested guarantee bandwidth
793 vpci: requested virtual PCI address
794 disk: disk size
795 is_public:
796 #TODO to concrete
797 Returns the flavor identifier"""
798
799 # generate a new uuid put to internal dict and return it.
800 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
801 new_flavor=flavor_data
802 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
803 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
804 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
805
806 if not isinstance(ram, int):
807 raise vimconn.vimconnException("Non-integer value for ram")
808 elif not isinstance(cpu, int):
809 raise vimconn.vimconnException("Non-integer value for cpu")
810 elif not isinstance(disk, int):
811 raise vimconn.vimconnException("Non-integer value for disk")
812
813 extended_flv = flavor_data.get("extended")
814 if extended_flv:
815 numas=extended_flv.get("numas")
816 if numas:
817 for numa in numas:
818 #overwrite ram and vcpus
819 ram = numa['memory']*1024
820 if 'paired-threads' in numa:
821 cpu = numa['paired-threads']*2
822 elif 'cores' in numa:
823 cpu = numa['cores']
824 elif 'threads' in numa:
825 cpu = numa['threads']
826
827 new_flavor[FLAVOR_RAM_KEY] = ram
828 new_flavor[FLAVOR_VCPUS_KEY] = cpu
829 new_flavor[FLAVOR_DISK_KEY] = disk
830 # generate a new uuid put to internal dict and return it.
831 flavor_id = uuid.uuid4()
832 vimconnector.flavorlist[str(flavor_id)] = new_flavor
833 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
834
835 return str(flavor_id)
836
837 def delete_flavor(self, flavor_id):
838 """Deletes a tenant flavor from VIM identify by its id
839
840 Returns the used id or raise an exception
841 """
842 if flavor_id not in vimconnector.flavorlist:
843 raise vimconn.vimconnNotFoundException("Flavor not found.")
844
845 vimconnector.flavorlist.pop(flavor_id, None)
846 return flavor_id
847
848 def new_image(self, image_dict):
849 """
850 Adds a tenant image to VIM
851 Returns:
852 200, image-id if the image is created
853 <0, message if there is an error
854 """
855
856 return self.get_image_id_from_path(image_dict['location'])
857
858 def delete_image(self, image_id):
859 """
860 Deletes a tenant image from VIM
861 Args:
862 image_id is ID of Image to be deleted
863 Return:
864 returns the image identifier in UUID format or raises an exception on error
865 """
866 vca = self.connect_as_admin()
867 if not vca:
868 raise vimconn.vimconnConnectionException("self.connect() is failed")
869 # Get Catalog details
870 url_list = [self.vca.host, '/api/catalog/', image_id]
871 catalog_herf = ''.join(url_list)
872 response = Http.get(url=catalog_herf,
873 headers=vca.vcloud_session.get_vcloud_headers(),
874 verify=vca.verify,
875 logger=vca.logger)
876
877 if response.status_code != requests.codes.ok:
878 self.logger.debug("delete_image():GET REST API call {} failed. "\
879 "Return status code {}".format(catalog_herf,
880 response.status_code))
881 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
882
883 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
884 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
885 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
886
887 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
888 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
889 for catalogItem in catalogItems:
890 catalogItem_href = catalogItem.attrib['href']
891
892 #GET details of catalogItem
893 response = Http.get(url=catalogItem_href,
894 headers=vca.vcloud_session.get_vcloud_headers(),
895 verify=vca.verify,
896 logger=vca.logger)
897
898 if response.status_code != requests.codes.ok:
899 self.logger.debug("delete_image():GET REST API call {} failed. "\
900 "Return status code {}".format(catalog_herf,
901 response.status_code))
902 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
903 catalogItem,
904 image_id))
905
906 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
907 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
908 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
909 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
910
911 #Remove catalogItem
912 response = Http.delete(url= catalogitem_remove_href,
913 headers=vca.vcloud_session.get_vcloud_headers(),
914 verify=vca.verify,
915 logger=vca.logger)
916 if response.status_code == requests.codes.no_content:
917 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
918 else:
919 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
920
921 #Remove catalog
922 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
923 catalog_remove_herf = ''.join(url_list)
924 response = Http.delete(url= catalog_remove_herf,
925 headers=vca.vcloud_session.get_vcloud_headers(),
926 verify=vca.verify,
927 logger=vca.logger)
928
929 if response.status_code == requests.codes.no_content:
930 self.logger.debug("Deleted Catalog {}".format(image_id))
931 return image_id
932 else:
933 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
934
935
936 def catalog_exists(self, catalog_name, catalogs):
937 """
938
939 :param catalog_name:
940 :param catalogs:
941 :return:
942 """
943 for catalog in catalogs:
944 if catalog.name == catalog_name:
945 return True
946 return False
947
948 def create_vimcatalog(self, vca=None, catalog_name=None):
949 """ Create new catalog entry in vCloud director.
950
951 Args
952 vca: vCloud director.
953 catalog_name catalog that client wish to create. Note no validation done for a name.
954 Client must make sure that provide valid string representation.
955
956 Return (bool) True if catalog created.
957
958 """
959 try:
960 task = vca.create_catalog(catalog_name, catalog_name)
961 result = vca.block_until_completed(task)
962 if not result:
963 return False
964 catalogs = vca.get_catalogs()
965 except:
966 return False
967 return self.catalog_exists(catalog_name, catalogs)
968
969 # noinspection PyIncorrectDocstring
970 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
971 description='', progress=False, chunk_bytes=128 * 1024):
972 """
973 Uploads a OVF file to a vCloud catalog
974
975 :param chunk_bytes:
976 :param progress:
977 :param description:
978 :param image_name:
979 :param vca:
980 :param catalog_name: (str): The name of the catalog to upload the media.
981 :param media_file_name: (str): The name of the local media file to upload.
982 :return: (bool) True if the media file was successfully uploaded, false otherwise.
983 """
984 os.path.isfile(media_file_name)
985 statinfo = os.stat(media_file_name)
986
987 # find a catalog entry where we upload OVF.
988 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
989 # status change.
990 # if VCD can parse OVF we upload VMDK file
991 try:
992 for catalog in vca.get_catalogs():
993 if catalog_name != catalog.name:
994 continue
995 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
996 link.get_rel() == 'add', catalog.get_Link())
997 assert len(link) == 1
998 data = """
999 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
1000 """ % (escape(catalog_name), escape(description))
1001 headers = vca.vcloud_session.get_vcloud_headers()
1002 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1003 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
1004 if response.status_code == requests.codes.created:
1005 catalogItem = XmlElementTree.fromstring(response.content)
1006 entity = [child for child in catalogItem if
1007 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1008 href = entity.get('href')
1009 template = href
1010 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1011 verify=vca.verify, logger=self.logger)
1012
1013 if response.status_code == requests.codes.ok:
1014 media = mediaType.parseString(response.content, True)
1015 link = filter(lambda link: link.get_rel() == 'upload:default',
1016 media.get_Files().get_File()[0].get_Link())[0]
1017 headers = vca.vcloud_session.get_vcloud_headers()
1018 headers['Content-Type'] = 'Content-Type text/xml'
1019 response = Http.put(link.get_href(),
1020 data=open(media_file_name, 'rb'),
1021 headers=headers,
1022 verify=vca.verify, logger=self.logger)
1023 if response.status_code != requests.codes.ok:
1024 self.logger.debug(
1025 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1026 media_file_name))
1027 return False
1028
1029 # TODO fix this with aync block
1030 time.sleep(5)
1031
1032 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1033
1034 # uploading VMDK file
1035 # check status of OVF upload and upload remaining files.
1036 response = Http.get(template,
1037 headers=vca.vcloud_session.get_vcloud_headers(),
1038 verify=vca.verify,
1039 logger=self.logger)
1040
1041 if response.status_code == requests.codes.ok:
1042 media = mediaType.parseString(response.content, True)
1043 number_of_files = len(media.get_Files().get_File())
1044 for index in xrange(0, number_of_files):
1045 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1046 media.get_Files().get_File()[index].get_Link())
1047 for link in links_list:
1048 # we skip ovf since it already uploaded.
1049 if 'ovf' in link.get_href():
1050 continue
1051 # The OVF file and VMDK must be in a same directory
1052 head, tail = os.path.split(media_file_name)
1053 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1054 if not os.path.isfile(file_vmdk):
1055 return False
1056 statinfo = os.stat(file_vmdk)
1057 if statinfo.st_size == 0:
1058 return False
1059 hrefvmdk = link.get_href()
1060
1061 if progress:
1062 print("Uploading file: {}".format(file_vmdk))
1063 if progress:
1064 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1065 FileTransferSpeed()]
1066 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1067
1068 bytes_transferred = 0
1069 f = open(file_vmdk, 'rb')
1070 while bytes_transferred < statinfo.st_size:
1071 my_bytes = f.read(chunk_bytes)
1072 if len(my_bytes) <= chunk_bytes:
1073 headers = vca.vcloud_session.get_vcloud_headers()
1074 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1075 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1076 headers['Content-Length'] = str(len(my_bytes))
1077 response = Http.put(hrefvmdk,
1078 headers=headers,
1079 data=my_bytes,
1080 verify=vca.verify,
1081 logger=None)
1082
1083 if response.status_code == requests.codes.ok:
1084 bytes_transferred += len(my_bytes)
1085 if progress:
1086 progress_bar.update(bytes_transferred)
1087 else:
1088 self.logger.debug(
1089 'file upload failed with error: [%s] %s' % (response.status_code,
1090 response.content))
1091
1092 f.close()
1093 return False
1094 f.close()
1095 if progress:
1096 progress_bar.finish()
1097 time.sleep(10)
1098 return True
1099 else:
1100 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1101 format(catalog_name, media_file_name))
1102 return False
1103 except Exception as exp:
1104 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1105 .format(catalog_name,media_file_name, exp))
1106 raise vimconn.vimconnException(
1107 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1108 .format(catalog_name,media_file_name, exp))
1109
1110 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1111 return False
1112
1113 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1114 """Upload media file"""
1115 # TODO add named parameters for readability
1116
1117 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1118 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1119
1120 def validate_uuid4(self, uuid_string=None):
1121 """ Method validate correct format of UUID.
1122
1123 Return: true if string represent valid uuid
1124 """
1125 try:
1126 val = uuid.UUID(uuid_string, version=4)
1127 except ValueError:
1128 return False
1129 return True
1130
1131 def get_catalogid(self, catalog_name=None, catalogs=None):
1132 """ Method check catalog and return catalog ID in UUID format.
1133
1134 Args
1135 catalog_name: catalog name as string
1136 catalogs: list of catalogs.
1137
1138 Return: catalogs uuid
1139 """
1140
1141 for catalog in catalogs:
1142 if catalog.name == catalog_name:
1143 catalog_id = catalog.get_id().split(":")
1144 return catalog_id[3]
1145 return None
1146
1147 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1148 """ Method check catalog and return catalog name lookup done by catalog UUID.
1149
1150 Args
1151 catalog_name: catalog name as string
1152 catalogs: list of catalogs.
1153
1154 Return: catalogs name or None
1155 """
1156
1157 if not self.validate_uuid4(uuid_string=catalog_uuid):
1158 return None
1159
1160 for catalog in catalogs:
1161 catalog_id = catalog.get_id().split(":")[3]
1162 if catalog_id == catalog_uuid:
1163 return catalog.name
1164 return None
1165
1166 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1167 """ Method check catalog and return catalog name lookup done by catalog UUID.
1168
1169 Args
1170 catalog_name: catalog name as string
1171 catalogs: list of catalogs.
1172
1173 Return: catalogs name or None
1174 """
1175
1176 if not self.validate_uuid4(uuid_string=catalog_uuid):
1177 return None
1178
1179 for catalog in catalogs:
1180 catalog_id = catalog.get_id().split(":")[3]
1181 if catalog_id == catalog_uuid:
1182 return catalog
1183 return None
1184
1185 def get_image_id_from_path(self, path=None, progress=False):
1186 """ Method upload OVF image to vCloud director.
1187
1188 Each OVF image represented as single catalog entry in vcloud director.
1189 The method check for existing catalog entry. The check done by file name without file extension.
1190
1191 if given catalog name already present method will respond with existing catalog uuid otherwise
1192 it will create new catalog entry and upload OVF file to newly created catalog.
1193
1194 If method can't create catalog entry or upload a file it will throw exception.
1195
1196 Method accept boolean flag progress that will output progress bar. It useful method
1197 for standalone upload use case. In case to test large file upload.
1198
1199 Args
1200 path: - valid path to OVF file.
1201 progress - boolean progress bar show progress bar.
1202
1203 Return: if image uploaded correct method will provide image catalog UUID.
1204 """
1205
1206 if not path:
1207 raise vimconn.vimconnException("Image path can't be None.")
1208
1209 if not os.path.isfile(path):
1210 raise vimconn.vimconnException("Can't read file. File not found.")
1211
1212 if not os.access(path, os.R_OK):
1213 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1214
1215 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1216
1217 dirpath, filename = os.path.split(path)
1218 flname, file_extension = os.path.splitext(path)
1219 if file_extension != '.ovf':
1220 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1221 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1222
1223 catalog_name = os.path.splitext(filename)[0]
1224 catalog_md5_name = hashlib.md5(path).hexdigest()
1225 self.logger.debug("File name {} Catalog Name {} file path {} "
1226 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1227
1228 try:
1229 catalogs = self.vca.get_catalogs()
1230 except Exception as exp:
1231 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1232 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1233
1234 if len(catalogs) == 0:
1235 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1236 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1237 if not result:
1238 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1239 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1240 media_name=filename, medial_file_name=path, progress=progress)
1241 if not result:
1242 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1243 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1244 else:
1245 for catalog in catalogs:
1246 # search for existing catalog if we find same name we return ID
1247 # TODO optimize this
1248 if catalog.name == catalog_md5_name:
1249 self.logger.debug("Found existing catalog entry for {} "
1250 "catalog id {}".format(catalog_name,
1251 self.get_catalogid(catalog_md5_name, catalogs)))
1252 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1253
1254 # if we didn't find existing catalog we create a new one and upload image.
1255 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1256 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1257 if not result:
1258 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1259
1260 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1261 media_name=filename, medial_file_name=path, progress=progress)
1262 if not result:
1263 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1264
1265 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1266
1267 def get_image_list(self, filter_dict={}):
1268 '''Obtain tenant images from VIM
1269 Filter_dict can be:
1270 name: image name
1271 id: image uuid
1272 checksum: image checksum
1273 location: image path
1274 Returns the image list of dictionaries:
1275 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1276 List can be empty
1277 '''
1278
1279 try:
1280 image_list = []
1281 catalogs = self.vca.get_catalogs()
1282 if len(catalogs) == 0:
1283 return image_list
1284 else:
1285 for catalog in catalogs:
1286 catalog_uuid = catalog.get_id().split(":")[3]
1287 name = catalog.name
1288 filtered_dict = {}
1289 if filter_dict.get("name") and filter_dict["name"] != name:
1290 continue
1291 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1292 continue
1293 filtered_dict ["name"] = name
1294 filtered_dict ["id"] = catalog_uuid
1295 image_list.append(filtered_dict)
1296
1297 self.logger.debug("List of already created catalog items: {}".format(image_list))
1298 return image_list
1299 except Exception as exp:
1300 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1301
1302 def get_vappid(self, vdc=None, vapp_name=None):
1303 """ Method takes vdc object and vApp name and returns vapp uuid or None
1304
1305 Args:
1306 vdc: The VDC object.
1307 vapp_name: is application vappp name identifier
1308
1309 Returns:
1310 The return vApp name otherwise None
1311 """
1312 if vdc is None or vapp_name is None:
1313 return None
1314 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1315 try:
1316 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1317 vdc.ResourceEntities.ResourceEntity)
1318 if len(refs) == 1:
1319 return refs[0].href.split("vapp")[1][1:]
1320 except Exception as e:
1321 self.logger.exception(e)
1322 return False
1323 return None
1324
1325 def check_vapp(self, vdc=None, vapp_uuid=None):
1326 """ Method Method returns True or False if vapp deployed in vCloud director
1327
1328 Args:
1329 vca: Connector to VCA
1330 vdc: The VDC object.
1331 vappid: vappid is application identifier
1332
1333 Returns:
1334 The return True if vApp deployed
1335 :param vdc:
1336 :param vapp_uuid:
1337 """
1338 try:
1339 refs = filter(lambda ref:
1340 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1341 vdc.ResourceEntities.ResourceEntity)
1342 for ref in refs:
1343 vappid = ref.href.split("vapp")[1][1:]
1344 # find vapp with respected vapp uuid
1345 if vappid == vapp_uuid:
1346 return True
1347 except Exception as e:
1348 self.logger.exception(e)
1349 return False
1350 return False
1351
1352 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1353 """Method returns vApp name from vCD and lookup done by vapp_id.
1354
1355 Args:
1356 vca: Connector to VCA
1357 vdc: The VDC object.
1358 vapp_uuid: vappid is application identifier
1359
1360 Returns:
1361 The return vApp name otherwise None
1362 """
1363
1364 try:
1365 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1366 vdc.ResourceEntities.ResourceEntity)
1367 for ref in refs:
1368 # we care only about UUID the rest doesn't matter
1369 vappid = ref.href.split("vapp")[1][1:]
1370 if vappid == vapp_uuid:
1371 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1372 logger=self.logger)
1373
1374 #Retry login if session expired & retry sending request
1375 if response.status_code == 403:
1376 response = self.retry_rest('GET', ref.href)
1377
1378 tree = XmlElementTree.fromstring(response.content)
1379 return tree.attrib['name']
1380 except Exception as e:
1381 self.logger.exception(e)
1382 return None
1383 return None
1384
1385 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1386 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1387 """Adds a VM instance to VIM
1388 Params:
1389 'start': (boolean) indicates if VM must start or created in pause mode.
1390 'image_id','flavor_id': image and flavor VIM id to use for the VM
1391 'net_list': list of interfaces, each one is a dictionary with:
1392 'name': (optional) name for the interface.
1393 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1394 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1395 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1396 'mac_address': (optional) mac address to assign to this interface
1397 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1398 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1399 'type': (mandatory) can be one of:
1400 'virtual', in this case always connected to a network of type 'net_type=bridge'
1401 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1402 can created unconnected
1403 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1404 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1405 are allocated on the same physical NIC
1406 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1407 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1408 or True, it must apply the default VIM behaviour
1409 After execution the method will add the key:
1410 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1411 interface. 'net_list' is modified
1412 'cloud_config': (optional) dictionary with:
1413 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1414 'users': (optional) list of users to be inserted, each item is a dict with:
1415 'name': (mandatory) user name,
1416 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1417 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1418 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1419 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1420 'dest': (mandatory) string with the destination absolute path
1421 'encoding': (optional, by default text). Can be one of:
1422 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1423 'content' (mandatory): string with the content of the file
1424 'permissions': (optional) string with file permissions, typically octal notation '0644'
1425 'owner': (optional) file owner, string with the format 'owner:group'
1426 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1427 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1428 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1429 'size': (mandatory) string with the size of the disk in GB
1430 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1431 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1432 availability_zone_index is None
1433 Returns a tuple with the instance identifier and created_items or raises an exception on error
1434 created_items can be None or a dictionary where this method can include key-values that will be passed to
1435 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1436 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1437 as not present.
1438 """
1439 self.logger.info("Creating new instance for entry {}".format(name))
1440 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1441 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1442
1443 #new vm name = vmname + tenant_id + uuid
1444 new_vm_name = [name, '-', str(uuid.uuid4())]
1445 vmname_andid = ''.join(new_vm_name)
1446
1447 # if vm already deployed we return existing uuid
1448 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1449 # if vapp_uuid is not None:
1450 # return vapp_uuid
1451
1452 # we check for presence of VDC, Catalog entry and Flavor.
1453 vdc = self.get_vdc_details()
1454 if vdc is None:
1455 raise vimconn.vimconnNotFoundException(
1456 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1457 catalogs = self.vca.get_catalogs()
1458 if catalogs is None:
1459 #Retry once, if failed by refreshing token
1460 self.get_token()
1461 catalogs = self.vca.get_catalogs()
1462 if catalogs is None:
1463 raise vimconn.vimconnNotFoundException(
1464 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1465
1466 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1467 if catalog_hash_name:
1468 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1469 else:
1470 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1471 "(Failed retrieve catalog information {})".format(name, image_id))
1472
1473
1474 # Set vCPU and Memory based on flavor.
1475 vm_cpus = None
1476 vm_memory = None
1477 vm_disk = None
1478 numas = None
1479
1480 if flavor_id is not None:
1481 if flavor_id not in vimconnector.flavorlist:
1482 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1483 "Failed retrieve flavor information "
1484 "flavor id {}".format(name, flavor_id))
1485 else:
1486 try:
1487 flavor = vimconnector.flavorlist[flavor_id]
1488 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1489 vm_memory = flavor[FLAVOR_RAM_KEY]
1490 vm_disk = flavor[FLAVOR_DISK_KEY]
1491 extended = flavor.get("extended", None)
1492 if extended:
1493 numas=extended.get("numas", None)
1494
1495 except Exception as exp:
1496 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1497
1498 # image upload creates template name as catalog name space Template.
1499 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1500 power_on = 'false'
1501 if start:
1502 power_on = 'true'
1503
1504 # client must provide at least one entry in net_list if not we report error
1505 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1506 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1507 primary_net = None
1508 primary_netname = None
1509 network_mode = 'bridged'
1510 if net_list is not None and len(net_list) > 0:
1511 for net in net_list:
1512 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1513 primary_net = net
1514 if primary_net is None:
1515 primary_net = net_list[0]
1516
1517 try:
1518 primary_net_id = primary_net['net_id']
1519 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1520 if 'name' in network_dict:
1521 primary_netname = network_dict['name']
1522
1523 except KeyError:
1524 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1525 else:
1526 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1527
1528 # use: 'data', 'bridge', 'mgmt'
1529 # create vApp. Set vcpu and ram based on flavor id.
1530 try:
1531 for retry in (1,2):
1532 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1533 self.get_catalogbyid(image_id, catalogs),
1534 network_name=None, # None while creating vapp
1535 network_mode=network_mode,
1536 vm_name=vmname_andid,
1537 vm_cpus=vm_cpus, # can be None if flavor is None
1538 vm_memory=vm_memory) # can be None if flavor is None
1539
1540 if not vapptask and retry==1:
1541 self.get_token() # Retry getting token
1542 continue
1543 else:
1544 break
1545
1546 if vapptask is None or vapptask is False:
1547 raise vimconn.vimconnUnexpectedResponse(
1548 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1549 if type(vapptask) is VappTask:
1550 self.vca.block_until_completed(vapptask)
1551
1552 except Exception as exp:
1553 raise vimconn.vimconnUnexpectedResponse(
1554 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1555
1556 # we should have now vapp in undeployed state.
1557 try:
1558 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1559
1560 except Exception as exp:
1561 raise vimconn.vimconnUnexpectedResponse(
1562 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1563 .format(vmname_andid, exp))
1564
1565 if vapp_uuid is None:
1566 raise vimconn.vimconnUnexpectedResponse(
1567 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1568 vmname_andid))
1569
1570 #Add PCI passthrough/SRIOV configrations
1571 vm_obj = None
1572 pci_devices_info = []
1573 sriov_net_info = []
1574 reserve_memory = False
1575
1576 for net in net_list:
1577 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1578 pci_devices_info.append(net)
1579 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1580 sriov_net_info.append(net)
1581
1582 #Add PCI
1583 if len(pci_devices_info) > 0:
1584 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1585 vmname_andid ))
1586 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1587 pci_devices_info,
1588 vmname_andid)
1589 if PCI_devices_status:
1590 self.logger.info("Added PCI devives {} to VM {}".format(
1591 pci_devices_info,
1592 vmname_andid)
1593 )
1594 reserve_memory = True
1595 else:
1596 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1597 pci_devices_info,
1598 vmname_andid)
1599 )
1600
1601 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1602 # Modify vm disk
1603 if vm_disk:
1604 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1605 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1606 if result :
1607 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1608
1609 #Add new or existing disks to vApp
1610 if disk_list:
1611 added_existing_disk = False
1612 for disk in disk_list:
1613 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1614 image_id = disk['image_id']
1615 # Adding CD-ROM to VM
1616 # will revisit code once specification ready to support this feature
1617 self.insert_media_to_vm(vapp, image_id)
1618 elif "image_id" in disk and disk["image_id"] is not None:
1619 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1620 disk["image_id"] , vapp_uuid))
1621 self.add_existing_disk(catalogs=catalogs,
1622 image_id=disk["image_id"],
1623 size = disk["size"],
1624 template_name=templateName,
1625 vapp_uuid=vapp_uuid
1626 )
1627 added_existing_disk = True
1628 else:
1629 #Wait till added existing disk gets reflected into vCD database/API
1630 if added_existing_disk:
1631 time.sleep(5)
1632 added_existing_disk = False
1633 self.add_new_disk(vapp_uuid, disk['size'])
1634
1635 if numas:
1636 # Assigning numa affinity setting
1637 for numa in numas:
1638 if 'paired-threads-id' in numa:
1639 paired_threads_id = numa['paired-threads-id']
1640 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1641
1642 # add NICs & connect to networks in netlist
1643 try:
1644 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1645 nicIndex = 0
1646 primary_nic_index = 0
1647 for net in net_list:
1648 # openmano uses network id in UUID format.
1649 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1650 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1651 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1652
1653 if 'net_id' not in net:
1654 continue
1655
1656 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1657 #Same will be returned in refresh_vms_status() as vim_interface_id
1658 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1659
1660 interface_net_id = net['net_id']
1661 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1662 interface_network_mode = net['use']
1663
1664 if interface_network_mode == 'mgmt':
1665 primary_nic_index = nicIndex
1666
1667 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1668 - DHCP (The IP address is obtained from a DHCP service.)
1669 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1670 - NONE (No IP addressing mode specified.)"""
1671
1672 if primary_netname is not None:
1673 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1674 if len(nets) == 1:
1675 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1676
1677 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1678 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1679 if type(task) is GenericTask:
1680 self.vca.block_until_completed(task)
1681 # connect network to VM - with all DHCP by default
1682
1683 type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
1684 if 'type' in net and net['type'] not in type_list:
1685 # fetching nic type from vnf
1686 if 'model' in net:
1687 if net['model'].lower() == 'virtio':
1688 nic_type = 'VMXNET3'
1689 else:
1690 nic_type = net['model']
1691
1692 self.logger.info("new_vminstance(): adding network adapter "\
1693 "to a network {}".format(nets[0].name))
1694 self.add_network_adapter_to_vms(vapp, nets[0].name,
1695 primary_nic_index,
1696 nicIndex,
1697 net,
1698 nic_type=nic_type)
1699 else:
1700 self.logger.info("new_vminstance(): adding network adapter "\
1701 "to a network {}".format(nets[0].name))
1702 self.add_network_adapter_to_vms(vapp, nets[0].name,
1703 primary_nic_index,
1704 nicIndex,
1705 net)
1706 nicIndex += 1
1707
1708 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1709 # cloud-init for ssh-key injection
1710 if cloud_config:
1711 self.cloud_init(vapp,cloud_config)
1712
1713 # deploy and power on vm
1714 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1715 deploytask = vapp.deploy(powerOn=False)
1716 if type(deploytask) is GenericTask:
1717 self.vca.block_until_completed(deploytask)
1718
1719 # ############# Stub code for SRIOV #################
1720 #Add SRIOV
1721 # if len(sriov_net_info) > 0:
1722 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1723 # vmname_andid ))
1724 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1725 # sriov_net_info,
1726 # vmname_andid)
1727 # if sriov_status:
1728 # self.logger.info("Added SRIOV {} to VM {}".format(
1729 # sriov_net_info,
1730 # vmname_andid)
1731 # )
1732 # reserve_memory = True
1733 # else:
1734 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1735 # sriov_net_info,
1736 # vmname_andid)
1737 # )
1738
1739 # If VM has PCI devices or SRIOV reserve memory for VM
1740 if reserve_memory:
1741 memReserve = vm_obj.config.hardware.memoryMB
1742 spec = vim.vm.ConfigSpec()
1743 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1744 task = vm_obj.ReconfigVM_Task(spec=spec)
1745 if task:
1746 result = self.wait_for_vcenter_task(task, vcenter_conect)
1747 self.logger.info("Reserved memory {} MB for "
1748 "VM VM status: {}".format(str(memReserve), result))
1749 else:
1750 self.logger.info("Fail to reserved memory {} to VM {}".format(
1751 str(memReserve), str(vm_obj)))
1752
1753 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1754
1755 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1756 poweron_task = vapp.poweron()
1757 if type(poweron_task) is GenericTask:
1758 self.vca.block_until_completed(poweron_task)
1759
1760 except Exception as exp :
1761 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1762 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1763 .format(name, exp))
1764 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1765 .format(name, exp))
1766
1767 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1768 wait_time = 0
1769 vapp_uuid = None
1770 while wait_time <= MAX_WAIT_TIME:
1771 try:
1772 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1773 except Exception as exp:
1774 raise vimconn.vimconnUnexpectedResponse(
1775 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1776 .format(vmname_andid, exp))
1777
1778 if vapp and vapp.me.deployed:
1779 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1780 break
1781 else:
1782 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1783 time.sleep(INTERVAL_TIME)
1784
1785 wait_time +=INTERVAL_TIME
1786
1787 if vapp_uuid is not None:
1788 return vapp_uuid, None
1789 else:
1790 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1791
1792 ##
1793 ##
1794 ## based on current discussion
1795 ##
1796 ##
1797 ## server:
1798 # created: '2016-09-08T11:51:58'
1799 # description: simple-instance.linux1.1
1800 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1801 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1802 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1803 # status: ACTIVE
1804 # error_msg:
1805 # interfaces: …
1806 #
1807 def get_vminstance(self, vim_vm_uuid=None):
1808 """Returns the VM instance information from VIM"""
1809
1810 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1811
1812 vdc = self.get_vdc_details()
1813 if vdc is None:
1814 raise vimconn.vimconnConnectionException(
1815 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1816
1817 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1818 if not vm_info_dict:
1819 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1820 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1821
1822 status_key = vm_info_dict['status']
1823 error = ''
1824 try:
1825 vm_dict = {'created': vm_info_dict['created'],
1826 'description': vm_info_dict['name'],
1827 'status': vcdStatusCode2manoFormat[int(status_key)],
1828 'hostId': vm_info_dict['vmuuid'],
1829 'error_msg': error,
1830 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1831
1832 if 'interfaces' in vm_info_dict:
1833 vm_dict['interfaces'] = vm_info_dict['interfaces']
1834 else:
1835 vm_dict['interfaces'] = []
1836 except KeyError:
1837 vm_dict = {'created': '',
1838 'description': '',
1839 'status': vcdStatusCode2manoFormat[int(-1)],
1840 'hostId': vm_info_dict['vmuuid'],
1841 'error_msg': "Inconsistency state",
1842 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1843
1844 return vm_dict
1845
1846 def delete_vminstance(self, vm__vim_uuid, created_items=None):
1847 """Method poweroff and remove VM instance from vcloud director network.
1848
1849 Args:
1850 vm__vim_uuid: VM UUID
1851
1852 Returns:
1853 Returns the instance identifier
1854 """
1855
1856 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1857
1858 vdc = self.get_vdc_details()
1859 if vdc is None:
1860 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1861 self.tenant_name))
1862 raise vimconn.vimconnException(
1863 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1864
1865 try:
1866 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1867 if vapp_name is None:
1868 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1869 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1870 else:
1871 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1872
1873 # Delete vApp and wait for status change if task executed and vApp is None.
1874 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1875
1876 if vapp:
1877 if vapp.me.deployed:
1878 self.logger.info("Powering off vApp {}".format(vapp_name))
1879 #Power off vApp
1880 powered_off = False
1881 wait_time = 0
1882 while wait_time <= MAX_WAIT_TIME:
1883 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1884 if not vapp:
1885 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1886 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1887
1888 power_off_task = vapp.poweroff()
1889 if type(power_off_task) is GenericTask:
1890 result = self.vca.block_until_completed(power_off_task)
1891 if result:
1892 powered_off = True
1893 break
1894 else:
1895 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1896 time.sleep(INTERVAL_TIME)
1897
1898 wait_time +=INTERVAL_TIME
1899 if not powered_off:
1900 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1901 else:
1902 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1903
1904 #Undeploy vApp
1905 self.logger.info("Undeploy vApp {}".format(vapp_name))
1906 wait_time = 0
1907 undeployed = False
1908 while wait_time <= MAX_WAIT_TIME:
1909 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1910 if not vapp:
1911 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1912 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1913 undeploy_task = vapp.undeploy(action='powerOff')
1914
1915 if type(undeploy_task) is GenericTask:
1916 result = self.vca.block_until_completed(undeploy_task)
1917 if result:
1918 undeployed = True
1919 break
1920 else:
1921 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1922 time.sleep(INTERVAL_TIME)
1923
1924 wait_time +=INTERVAL_TIME
1925
1926 if not undeployed:
1927 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1928
1929 # delete vapp
1930 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1931 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1932
1933 if vapp is not None:
1934 wait_time = 0
1935 result = False
1936
1937 while wait_time <= MAX_WAIT_TIME:
1938 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1939 if not vapp:
1940 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1941 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1942
1943 delete_task = vapp.delete()
1944
1945 if type(delete_task) is GenericTask:
1946 self.vca.block_until_completed(delete_task)
1947 result = self.vca.block_until_completed(delete_task)
1948 if result:
1949 break
1950 else:
1951 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1952 time.sleep(INTERVAL_TIME)
1953
1954 wait_time +=INTERVAL_TIME
1955
1956 if not result:
1957 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1958
1959 except:
1960 self.logger.debug(traceback.format_exc())
1961 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1962
1963 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1964 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1965 return vm__vim_uuid
1966 else:
1967 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1968
1969 def refresh_vms_status(self, vm_list):
1970 """Get the status of the virtual machines and their interfaces/ports
1971 Params: the list of VM identifiers
1972 Returns a dictionary with:
1973 vm_id: #VIM id of this Virtual Machine
1974 status: #Mandatory. Text with one of:
1975 # DELETED (not found at vim)
1976 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1977 # OTHER (Vim reported other status not understood)
1978 # ERROR (VIM indicates an ERROR status)
1979 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1980 # CREATING (on building process), ERROR
1981 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1982 #
1983 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1984 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1985 interfaces:
1986 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1987 mac_address: #Text format XX:XX:XX:XX:XX:XX
1988 vim_net_id: #network id where this interface is connected
1989 vim_interface_id: #interface/port VIM id
1990 ip_address: #null, or text with IPv4, IPv6 address
1991 """
1992
1993 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1994
1995 vdc = self.get_vdc_details()
1996 if vdc is None:
1997 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1998
1999 vms_dict = {}
2000 nsx_edge_list = []
2001 for vmuuid in vm_list:
2002 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
2003 if vmname is not None:
2004
2005 try:
2006 vm_pci_details = self.get_vm_pci_details(vmuuid)
2007 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
2008 vm_info = the_vapp.get_vms_details()
2009 vm_status = vm_info[0]['status']
2010 vm_info[0].update(vm_pci_details)
2011
2012 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
2013 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
2014 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2015
2016 # get networks
2017 vm_app_networks = the_vapp.get_vms_network_info()
2018 for vapp_network in vm_app_networks:
2019 for vm_network in vapp_network:
2020 if vm_network['name'] == vmname:
2021 #Assign IP Address based on MAC Address in NSX DHCP lease info
2022 if vm_network['ip'] is None:
2023 if not nsx_edge_list:
2024 nsx_edge_list = self.get_edge_details()
2025 if nsx_edge_list is None:
2026 raise vimconn.vimconnException("refresh_vms_status:"\
2027 "Failed to get edge details from NSX Manager")
2028 if vm_network['mac'] is not None:
2029 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2030
2031 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2032 interface = {"mac_address": vm_network['mac'],
2033 "vim_net_id": vm_net_id,
2034 "vim_interface_id": vm_net_id,
2035 'ip_address': vm_network['ip']}
2036 # interface['vim_info'] = yaml.safe_dump(vm_network)
2037 vm_dict["interfaces"].append(interface)
2038 # add a vm to vm dict
2039 vms_dict.setdefault(vmuuid, vm_dict)
2040 except Exception as exp:
2041 self.logger.debug("Error in response {}".format(exp))
2042 self.logger.debug(traceback.format_exc())
2043
2044 return vms_dict
2045
2046
2047 def get_edge_details(self):
2048 """Get the NSX edge list from NSX Manager
2049 Returns list of NSX edges
2050 """
2051 edge_list = []
2052 rheaders = {'Content-Type': 'application/xml'}
2053 nsx_api_url = '/api/4.0/edges'
2054
2055 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2056
2057 try:
2058 resp = requests.get(self.nsx_manager + nsx_api_url,
2059 auth = (self.nsx_user, self.nsx_password),
2060 verify = False, headers = rheaders)
2061 if resp.status_code == requests.codes.ok:
2062 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2063 for edge_pages in paged_Edge_List:
2064 if edge_pages.tag == 'edgePage':
2065 for edge_summary in edge_pages:
2066 if edge_summary.tag == 'pagingInfo':
2067 for element in edge_summary:
2068 if element.tag == 'totalCount' and element.text == '0':
2069 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2070 .format(self.nsx_manager))
2071
2072 if edge_summary.tag == 'edgeSummary':
2073 for element in edge_summary:
2074 if element.tag == 'id':
2075 edge_list.append(element.text)
2076 else:
2077 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2078 .format(self.nsx_manager))
2079
2080 if not edge_list:
2081 raise vimconn.vimconnException("get_edge_details: "\
2082 "No NSX edge details found: {}"
2083 .format(self.nsx_manager))
2084 else:
2085 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2086 return edge_list
2087 else:
2088 self.logger.debug("get_edge_details: "
2089 "Failed to get NSX edge details from NSX Manager: {}"
2090 .format(resp.content))
2091 return None
2092
2093 except Exception as exp:
2094 self.logger.debug("get_edge_details: "\
2095 "Failed to get NSX edge details from NSX Manager: {}"
2096 .format(exp))
2097 raise vimconn.vimconnException("get_edge_details: "\
2098 "Failed to get NSX edge details from NSX Manager: {}"
2099 .format(exp))
2100
2101
2102 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2103 """Get IP address details from NSX edges, using the MAC address
2104 PARAMS: nsx_edges : List of NSX edges
2105 mac_address : Find IP address corresponding to this MAC address
2106 Returns: IP address corrresponding to the provided MAC address
2107 """
2108
2109 ip_addr = None
2110 rheaders = {'Content-Type': 'application/xml'}
2111
2112 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2113
2114 try:
2115 for edge in nsx_edges:
2116 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2117
2118 resp = requests.get(self.nsx_manager + nsx_api_url,
2119 auth = (self.nsx_user, self.nsx_password),
2120 verify = False, headers = rheaders)
2121
2122 if resp.status_code == requests.codes.ok:
2123 dhcp_leases = XmlElementTree.fromstring(resp.text)
2124 for child in dhcp_leases:
2125 if child.tag == 'dhcpLeaseInfo':
2126 dhcpLeaseInfo = child
2127 for leaseInfo in dhcpLeaseInfo:
2128 for elem in leaseInfo:
2129 if (elem.tag)=='macAddress':
2130 edge_mac_addr = elem.text
2131 if (elem.tag)=='ipAddress':
2132 ip_addr = elem.text
2133 if edge_mac_addr is not None:
2134 if edge_mac_addr == mac_address:
2135 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2136 .format(ip_addr, mac_address,edge))
2137 return ip_addr
2138 else:
2139 self.logger.debug("get_ipaddr_from_NSXedge: "\
2140 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2141 .format(resp.content))
2142
2143 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2144 return None
2145
2146 except XmlElementTree.ParseError as Err:
2147 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2148
2149
2150 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2151 """Send and action over a VM instance from VIM
2152 Returns the vm_id if the action was successfully sent to the VIM"""
2153
2154 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2155 if vm__vim_uuid is None or action_dict is None:
2156 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2157
2158 vdc = self.get_vdc_details()
2159 if vdc is None:
2160 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2161
2162 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2163 if vapp_name is None:
2164 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2165 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2166 else:
2167 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2168
2169 try:
2170 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2171 # TODO fix all status
2172 if "start" in action_dict:
2173 vm_info = the_vapp.get_vms_details()
2174 vm_status = vm_info[0]['status']
2175 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2176 if vm_status == "Suspended" or vm_status == "Powered off":
2177 power_on_task = the_vapp.poweron()
2178 result = self.vca.block_until_completed(power_on_task)
2179 self.instance_actions_result("start", result, vapp_name)
2180 elif "rebuild" in action_dict:
2181 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2182 rebuild_task = the_vapp.deploy(powerOn=True)
2183 result = self.vca.block_until_completed(rebuild_task)
2184 self.instance_actions_result("rebuild", result, vapp_name)
2185 elif "pause" in action_dict:
2186 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2187 pause_task = the_vapp.undeploy(action='suspend')
2188 result = self.vca.block_until_completed(pause_task)
2189 self.instance_actions_result("pause", result, vapp_name)
2190 elif "resume" in action_dict:
2191 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2192 power_task = the_vapp.poweron()
2193 result = self.vca.block_until_completed(power_task)
2194 self.instance_actions_result("resume", result, vapp_name)
2195 elif "shutoff" in action_dict or "shutdown" in action_dict:
2196 action_name , value = action_dict.items()[0]
2197 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2198 power_off_task = the_vapp.undeploy(action='powerOff')
2199 result = self.vca.block_until_completed(power_off_task)
2200 if action_name == "shutdown":
2201 self.instance_actions_result("shutdown", result, vapp_name)
2202 else:
2203 self.instance_actions_result("shutoff", result, vapp_name)
2204 elif "forceOff" in action_dict:
2205 result = the_vapp.undeploy(action='force')
2206 self.instance_actions_result("forceOff", result, vapp_name)
2207 elif "reboot" in action_dict:
2208 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2209 reboot_task = the_vapp.reboot()
2210 else:
2211 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2212 return None
2213 except Exception as exp :
2214 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2215 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2216
2217 def instance_actions_result(self, action, result, vapp_name):
2218 if result:
2219 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2220 else:
2221 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2222
2223 def get_vminstance_console(self, vm_id, console_type="vnc"):
2224 """
2225 Get a console for the virtual machine
2226 Params:
2227 vm_id: uuid of the VM
2228 console_type, can be:
2229 "novnc" (by default), "xvpvnc" for VNC types,
2230 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2231 Returns dict with the console parameters:
2232 protocol: ssh, ftp, http, https, ...
2233 server: usually ip address
2234 port: the http, ssh, ... port
2235 suffix: extra text, e.g. the http path and query string
2236 """
2237 raise vimconn.vimconnNotImplemented("Should have implemented this")
2238
2239 # NOT USED METHODS in current version
2240
2241 def host_vim2gui(self, host, server_dict):
2242 """Transform host dictionary from VIM format to GUI format,
2243 and append to the server_dict
2244 """
2245 raise vimconn.vimconnNotImplemented("Should have implemented this")
2246
2247 def get_hosts_info(self):
2248 """Get the information of deployed hosts
2249 Returns the hosts content"""
2250 raise vimconn.vimconnNotImplemented("Should have implemented this")
2251
2252 def get_hosts(self, vim_tenant):
2253 """Get the hosts and deployed instances
2254 Returns the hosts content"""
2255 raise vimconn.vimconnNotImplemented("Should have implemented this")
2256
2257 def get_processor_rankings(self):
2258 """Get the processor rankings in the VIM database"""
2259 raise vimconn.vimconnNotImplemented("Should have implemented this")
2260
2261 def new_host(self, host_data):
2262 """Adds a new host to VIM"""
2263 '''Returns status code of the VIM response'''
2264 raise vimconn.vimconnNotImplemented("Should have implemented this")
2265
2266 def new_external_port(self, port_data):
2267 """Adds a external port to VIM"""
2268 '''Returns the port identifier'''
2269 raise vimconn.vimconnNotImplemented("Should have implemented this")
2270
2271 def new_external_network(self, net_name, net_type):
2272 """Adds a external network to VIM (shared)"""
2273 '''Returns the network identifier'''
2274 raise vimconn.vimconnNotImplemented("Should have implemented this")
2275
2276 def connect_port_network(self, port_id, network_id, admin=False):
2277 """Connects a external port to a network"""
2278 '''Returns status code of the VIM response'''
2279 raise vimconn.vimconnNotImplemented("Should have implemented this")
2280
2281 def new_vminstancefromJSON(self, vm_data):
2282 """Adds a VM instance to VIM"""
2283 '''Returns the instance identifier'''
2284 raise vimconn.vimconnNotImplemented("Should have implemented this")
2285
2286 def get_network_name_by_id(self, network_uuid=None):
2287 """Method gets vcloud director network named based on supplied uuid.
2288
2289 Args:
2290 network_uuid: network_id
2291
2292 Returns:
2293 The return network name.
2294 """
2295
2296 if not network_uuid:
2297 return None
2298
2299 try:
2300 org_dict = self.get_org(self.org_uuid)
2301 if 'networks' in org_dict:
2302 org_network_dict = org_dict['networks']
2303 for net_uuid in org_network_dict:
2304 if net_uuid == network_uuid:
2305 return org_network_dict[net_uuid]
2306 except:
2307 self.logger.debug("Exception in get_network_name_by_id")
2308 self.logger.debug(traceback.format_exc())
2309
2310 return None
2311
2312 def get_network_id_by_name(self, network_name=None):
2313 """Method gets vcloud director network uuid based on supplied name.
2314
2315 Args:
2316 network_name: network_name
2317 Returns:
2318 The return network uuid.
2319 network_uuid: network_id
2320 """
2321
2322 if not network_name:
2323 self.logger.debug("get_network_id_by_name() : Network name is empty")
2324 return None
2325
2326 try:
2327 org_dict = self.get_org(self.org_uuid)
2328 if org_dict and 'networks' in org_dict:
2329 org_network_dict = org_dict['networks']
2330 for net_uuid,net_name in org_network_dict.iteritems():
2331 if net_name == network_name:
2332 return net_uuid
2333
2334 except KeyError as exp:
2335 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2336
2337 return None
2338
2339 def list_org_action(self):
2340 """
2341 Method leverages vCloud director and query for available organization for particular user
2342
2343 Args:
2344 vca - is active VCA connection.
2345 vdc_name - is a vdc name that will be used to query vms action
2346
2347 Returns:
2348 The return XML respond
2349 """
2350
2351 url_list = [self.vca.host, '/api/org']
2352 vm_list_rest_call = ''.join(url_list)
2353
2354 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2355 response = Http.get(url=vm_list_rest_call,
2356 headers=self.vca.vcloud_session.get_vcloud_headers(),
2357 verify=self.vca.verify,
2358 logger=self.vca.logger)
2359
2360 if response.status_code == 403:
2361 response = self.retry_rest('GET', vm_list_rest_call)
2362
2363 if response.status_code == requests.codes.ok:
2364 return response.content
2365
2366 return None
2367
2368 def get_org_action(self, org_uuid=None):
2369 """
2370 Method leverages vCloud director and retrieve available object fdr organization.
2371
2372 Args:
2373 vca - is active VCA connection.
2374 vdc_name - is a vdc name that will be used to query vms action
2375
2376 Returns:
2377 The return XML respond
2378 """
2379
2380 if org_uuid is None:
2381 return None
2382
2383 url_list = [self.vca.host, '/api/org/', org_uuid]
2384 vm_list_rest_call = ''.join(url_list)
2385
2386 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2387 response = Http.get(url=vm_list_rest_call,
2388 headers=self.vca.vcloud_session.get_vcloud_headers(),
2389 verify=self.vca.verify,
2390 logger=self.vca.logger)
2391
2392 #Retry login if session expired & retry sending request
2393 if response.status_code == 403:
2394 response = self.retry_rest('GET', vm_list_rest_call)
2395
2396 if response.status_code == requests.codes.ok:
2397 return response.content
2398
2399 return None
2400
2401 def get_org(self, org_uuid=None):
2402 """
2403 Method retrieves available organization in vCloud Director
2404
2405 Args:
2406 org_uuid - is a organization uuid.
2407
2408 Returns:
2409 The return dictionary with following key
2410 "network" - for network list under the org
2411 "catalogs" - for network list under the org
2412 "vdcs" - for vdc list under org
2413 """
2414
2415 org_dict = {}
2416
2417 if org_uuid is None:
2418 return org_dict
2419
2420 content = self.get_org_action(org_uuid=org_uuid)
2421 try:
2422 vdc_list = {}
2423 network_list = {}
2424 catalog_list = {}
2425 vm_list_xmlroot = XmlElementTree.fromstring(content)
2426 for child in vm_list_xmlroot:
2427 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2428 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2429 org_dict['vdcs'] = vdc_list
2430 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2431 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2432 org_dict['networks'] = network_list
2433 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2434 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2435 org_dict['catalogs'] = catalog_list
2436 except:
2437 pass
2438
2439 return org_dict
2440
2441 def get_org_list(self):
2442 """
2443 Method retrieves available organization in vCloud Director
2444
2445 Args:
2446 vca - is active VCA connection.
2447
2448 Returns:
2449 The return dictionary and key for each entry VDC UUID
2450 """
2451
2452 org_dict = {}
2453
2454 content = self.list_org_action()
2455 try:
2456 vm_list_xmlroot = XmlElementTree.fromstring(content)
2457 for vm_xml in vm_list_xmlroot:
2458 if vm_xml.tag.split("}")[1] == 'Org':
2459 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2460 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2461 except:
2462 pass
2463
2464 return org_dict
2465
2466 def vms_view_action(self, vdc_name=None):
2467 """ Method leverages vCloud director vms query call
2468
2469 Args:
2470 vca - is active VCA connection.
2471 vdc_name - is a vdc name that will be used to query vms action
2472
2473 Returns:
2474 The return XML respond
2475 """
2476 vca = self.connect()
2477 if vdc_name is None:
2478 return None
2479
2480 url_list = [vca.host, '/api/vms/query']
2481 vm_list_rest_call = ''.join(url_list)
2482
2483 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2484 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2485 vca.vcloud_session.organization.Link)
2486 if len(refs) == 1:
2487 response = Http.get(url=vm_list_rest_call,
2488 headers=vca.vcloud_session.get_vcloud_headers(),
2489 verify=vca.verify,
2490 logger=vca.logger)
2491 if response.status_code == requests.codes.ok:
2492 return response.content
2493
2494 return None
2495
2496 def get_vapp_list(self, vdc_name=None):
2497 """
2498 Method retrieves vApp list deployed vCloud director and returns a dictionary
2499 contains a list of all vapp deployed for queried VDC.
2500 The key for a dictionary is vApp UUID
2501
2502
2503 Args:
2504 vca - is active VCA connection.
2505 vdc_name - is a vdc name that will be used to query vms action
2506
2507 Returns:
2508 The return dictionary and key for each entry vapp UUID
2509 """
2510
2511 vapp_dict = {}
2512 if vdc_name is None:
2513 return vapp_dict
2514
2515 content = self.vms_view_action(vdc_name=vdc_name)
2516 try:
2517 vm_list_xmlroot = XmlElementTree.fromstring(content)
2518 for vm_xml in vm_list_xmlroot:
2519 if vm_xml.tag.split("}")[1] == 'VMRecord':
2520 if vm_xml.attrib['isVAppTemplate'] == 'true':
2521 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2522 if 'vappTemplate-' in rawuuid[0]:
2523 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2524 # vm and use raw UUID as key
2525 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2526 except:
2527 pass
2528
2529 return vapp_dict
2530
2531 def get_vm_list(self, vdc_name=None):
2532 """
2533 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2534 contains a list of all VM's deployed for queried VDC.
2535 The key for a dictionary is VM UUID
2536
2537
2538 Args:
2539 vca - is active VCA connection.
2540 vdc_name - is a vdc name that will be used to query vms action
2541
2542 Returns:
2543 The return dictionary and key for each entry vapp UUID
2544 """
2545 vm_dict = {}
2546
2547 if vdc_name is None:
2548 return vm_dict
2549
2550 content = self.vms_view_action(vdc_name=vdc_name)
2551 try:
2552 vm_list_xmlroot = XmlElementTree.fromstring(content)
2553 for vm_xml in vm_list_xmlroot:
2554 if vm_xml.tag.split("}")[1] == 'VMRecord':
2555 if vm_xml.attrib['isVAppTemplate'] == 'false':
2556 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2557 if 'vm-' in rawuuid[0]:
2558 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2559 # vm and use raw UUID as key
2560 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2561 except:
2562 pass
2563
2564 return vm_dict
2565
2566 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2567 """
2568 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2569 contains a list of all VM's deployed for queried VDC.
2570 The key for a dictionary is VM UUID
2571
2572
2573 Args:
2574 vca - is active VCA connection.
2575 vdc_name - is a vdc name that will be used to query vms action
2576
2577 Returns:
2578 The return dictionary and key for each entry vapp UUID
2579 """
2580 vm_dict = {}
2581 vca = self.connect()
2582 if not vca:
2583 raise vimconn.vimconnConnectionException("self.connect() is failed")
2584
2585 if vdc_name is None:
2586 return vm_dict
2587
2588 content = self.vms_view_action(vdc_name=vdc_name)
2589 try:
2590 vm_list_xmlroot = XmlElementTree.fromstring(content)
2591 for vm_xml in vm_list_xmlroot:
2592 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2593 # lookup done by UUID
2594 if isuuid:
2595 if vapp_name in vm_xml.attrib['container']:
2596 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2597 if 'vm-' in rawuuid[0]:
2598 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2599 break
2600 # lookup done by Name
2601 else:
2602 if vapp_name in vm_xml.attrib['name']:
2603 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2604 if 'vm-' in rawuuid[0]:
2605 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2606 break
2607 except:
2608 pass
2609
2610 return vm_dict
2611
2612 def get_network_action(self, network_uuid=None):
2613 """
2614 Method leverages vCloud director and query network based on network uuid
2615
2616 Args:
2617 vca - is active VCA connection.
2618 network_uuid - is a network uuid
2619
2620 Returns:
2621 The return XML respond
2622 """
2623
2624 if network_uuid is None:
2625 return None
2626
2627 url_list = [self.vca.host, '/api/network/', network_uuid]
2628 vm_list_rest_call = ''.join(url_list)
2629
2630 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2631 response = Http.get(url=vm_list_rest_call,
2632 headers=self.vca.vcloud_session.get_vcloud_headers(),
2633 verify=self.vca.verify,
2634 logger=self.vca.logger)
2635
2636 #Retry login if session expired & retry sending request
2637 if response.status_code == 403:
2638 response = self.retry_rest('GET', vm_list_rest_call)
2639
2640 if response.status_code == requests.codes.ok:
2641 return response.content
2642
2643 return None
2644
2645 def get_vcd_network(self, network_uuid=None):
2646 """
2647 Method retrieves available network from vCloud Director
2648
2649 Args:
2650 network_uuid - is VCD network UUID
2651
2652 Each element serialized as key : value pair
2653
2654 Following keys available for access. network_configuration['Gateway'}
2655 <Configuration>
2656 <IpScopes>
2657 <IpScope>
2658 <IsInherited>true</IsInherited>
2659 <Gateway>172.16.252.100</Gateway>
2660 <Netmask>255.255.255.0</Netmask>
2661 <Dns1>172.16.254.201</Dns1>
2662 <Dns2>172.16.254.202</Dns2>
2663 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2664 <IsEnabled>true</IsEnabled>
2665 <IpRanges>
2666 <IpRange>
2667 <StartAddress>172.16.252.1</StartAddress>
2668 <EndAddress>172.16.252.99</EndAddress>
2669 </IpRange>
2670 </IpRanges>
2671 </IpScope>
2672 </IpScopes>
2673 <FenceMode>bridged</FenceMode>
2674
2675 Returns:
2676 The return dictionary and key for each entry vapp UUID
2677 """
2678
2679 network_configuration = {}
2680 if network_uuid is None:
2681 return network_uuid
2682
2683 try:
2684 content = self.get_network_action(network_uuid=network_uuid)
2685 vm_list_xmlroot = XmlElementTree.fromstring(content)
2686
2687 network_configuration['status'] = vm_list_xmlroot.get("status")
2688 network_configuration['name'] = vm_list_xmlroot.get("name")
2689 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2690
2691 for child in vm_list_xmlroot:
2692 if child.tag.split("}")[1] == 'IsShared':
2693 network_configuration['isShared'] = child.text.strip()
2694 if child.tag.split("}")[1] == 'Configuration':
2695 for configuration in child.iter():
2696 tagKey = configuration.tag.split("}")[1].strip()
2697 if tagKey != "":
2698 network_configuration[tagKey] = configuration.text.strip()
2699 return network_configuration
2700 except Exception as exp :
2701 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2702 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2703
2704 return network_configuration
2705
2706 def delete_network_action(self, network_uuid=None):
2707 """
2708 Method delete given network from vCloud director
2709
2710 Args:
2711 network_uuid - is a network uuid that client wish to delete
2712
2713 Returns:
2714 The return None or XML respond or false
2715 """
2716
2717 vca = self.connect_as_admin()
2718 if not vca:
2719 raise vimconn.vimconnConnectionException("self.connect() is failed")
2720 if network_uuid is None:
2721 return False
2722
2723 url_list = [vca.host, '/api/admin/network/', network_uuid]
2724 vm_list_rest_call = ''.join(url_list)
2725
2726 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2727 response = Http.delete(url=vm_list_rest_call,
2728 headers=vca.vcloud_session.get_vcloud_headers(),
2729 verify=vca.verify,
2730 logger=vca.logger)
2731
2732 if response.status_code == 202:
2733 return True
2734
2735 return False
2736
2737 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2738 ip_profile=None, isshared='true'):
2739 """
2740 Method create network in vCloud director
2741
2742 Args:
2743 network_name - is network name to be created.
2744 net_type - can be 'bridge','data','ptp','mgmt'.
2745 ip_profile is a dict containing the IP parameters of the network
2746 isshared - is a boolean
2747 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2748 It optional attribute. by default if no parent network indicate the first available will be used.
2749
2750 Returns:
2751 The return network uuid or return None
2752 """
2753
2754 new_network_name = [network_name, '-', str(uuid.uuid4())]
2755 content = self.create_network_rest(network_name=''.join(new_network_name),
2756 ip_profile=ip_profile,
2757 net_type=net_type,
2758 parent_network_uuid=parent_network_uuid,
2759 isshared=isshared)
2760 if content is None:
2761 self.logger.debug("Failed create network {}.".format(network_name))
2762 return None
2763
2764 try:
2765 vm_list_xmlroot = XmlElementTree.fromstring(content)
2766 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2767 if len(vcd_uuid) == 4:
2768 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2769 return vcd_uuid[3]
2770 except:
2771 self.logger.debug("Failed create network {}".format(network_name))
2772 return None
2773
2774 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2775 ip_profile=None, isshared='true'):
2776 """
2777 Method create network in vCloud director
2778
2779 Args:
2780 network_name - is network name to be created.
2781 net_type - can be 'bridge','data','ptp','mgmt'.
2782 ip_profile is a dict containing the IP parameters of the network
2783 isshared - is a boolean
2784 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2785 It optional attribute. by default if no parent network indicate the first available will be used.
2786
2787 Returns:
2788 The return network uuid or return None
2789 """
2790
2791 vca = self.connect_as_admin()
2792 if not vca:
2793 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2794 if network_name is None:
2795 return None
2796
2797 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2798 vm_list_rest_call = ''.join(url_list)
2799 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2800 response = Http.get(url=vm_list_rest_call,
2801 headers=vca.vcloud_session.get_vcloud_headers(),
2802 verify=vca.verify,
2803 logger=vca.logger)
2804
2805 provider_network = None
2806 available_networks = None
2807 add_vdc_rest_url = None
2808
2809 if response.status_code != requests.codes.ok:
2810 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2811 response.status_code))
2812 return None
2813 else:
2814 try:
2815 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2816 for child in vm_list_xmlroot:
2817 if child.tag.split("}")[1] == 'ProviderVdcReference':
2818 provider_network = child.attrib.get('href')
2819 # application/vnd.vmware.admin.providervdc+xml
2820 if child.tag.split("}")[1] == 'Link':
2821 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2822 and child.attrib.get('rel') == 'add':
2823 add_vdc_rest_url = child.attrib.get('href')
2824 except:
2825 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2826 self.logger.debug("Respond body {}".format(response.content))
2827 return None
2828
2829 # find pvdc provided available network
2830 response = Http.get(url=provider_network,
2831 headers=vca.vcloud_session.get_vcloud_headers(),
2832 verify=vca.verify,
2833 logger=vca.logger)
2834 if response.status_code != requests.codes.ok:
2835 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2836 response.status_code))
2837 return None
2838
2839 # available_networks.split("/")[-1]
2840
2841 if parent_network_uuid is None:
2842 try:
2843 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2844 for child in vm_list_xmlroot.iter():
2845 if child.tag.split("}")[1] == 'AvailableNetworks':
2846 for networks in child.iter():
2847 # application/vnd.vmware.admin.network+xml
2848 if networks.attrib.get('href') is not None:
2849 available_networks = networks.attrib.get('href')
2850 break
2851 except:
2852 return None
2853
2854 try:
2855 #Configure IP profile of the network
2856 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2857
2858 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2859 subnet_rand = random.randint(0, 255)
2860 ip_base = "192.168.{}.".format(subnet_rand)
2861 ip_profile['subnet_address'] = ip_base + "0/24"
2862 else:
2863 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2864
2865 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2866 ip_profile['gateway_address']=ip_base + "1"
2867 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2868 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2869 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2870 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2871 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2872 ip_profile['dhcp_start_address']=ip_base + "3"
2873 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2874 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2875 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2876 ip_profile['dns_address']=ip_base + "2"
2877
2878 gateway_address=ip_profile['gateway_address']
2879 dhcp_count=int(ip_profile['dhcp_count'])
2880 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2881
2882 if ip_profile['dhcp_enabled']==True:
2883 dhcp_enabled='true'
2884 else:
2885 dhcp_enabled='false'
2886 dhcp_start_address=ip_profile['dhcp_start_address']
2887
2888 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2889 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2890 end_ip_int += dhcp_count - 1
2891 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2892
2893 ip_version=ip_profile['ip_version']
2894 dns_address=ip_profile['dns_address']
2895 except KeyError as exp:
2896 self.logger.debug("Create Network REST: Key error {}".format(exp))
2897 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2898
2899 # either use client provided UUID or search for a first available
2900 # if both are not defined we return none
2901 if parent_network_uuid is not None:
2902 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2903 add_vdc_rest_url = ''.join(url_list)
2904
2905 #Creating all networks as Direct Org VDC type networks.
2906 #Unused in case of Underlay (data/ptp) network interface.
2907 fence_mode="bridged"
2908 is_inherited='false'
2909 dns_list = dns_address.split(";")
2910 dns1 = dns_list[0]
2911 dns2_text = ""
2912 if len(dns_list) >= 2:
2913 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2914 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2915 <Description>Openmano created</Description>
2916 <Configuration>
2917 <IpScopes>
2918 <IpScope>
2919 <IsInherited>{1:s}</IsInherited>
2920 <Gateway>{2:s}</Gateway>
2921 <Netmask>{3:s}</Netmask>
2922 <Dns1>{4:s}</Dns1>{5:s}
2923 <IsEnabled>{6:s}</IsEnabled>
2924 <IpRanges>
2925 <IpRange>
2926 <StartAddress>{7:s}</StartAddress>
2927 <EndAddress>{8:s}</EndAddress>
2928 </IpRange>
2929 </IpRanges>
2930 </IpScope>
2931 </IpScopes>
2932 <ParentNetwork href="{9:s}"/>
2933 <FenceMode>{10:s}</FenceMode>
2934 </Configuration>
2935 <IsShared>{11:s}</IsShared>
2936 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2937 subnet_address, dns1, dns2_text, dhcp_enabled,
2938 dhcp_start_address, dhcp_end_address, available_networks,
2939 fence_mode, isshared)
2940
2941 headers = vca.vcloud_session.get_vcloud_headers()
2942 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2943 try:
2944 response = Http.post(url=add_vdc_rest_url,
2945 headers=headers,
2946 data=data,
2947 verify=vca.verify,
2948 logger=vca.logger)
2949
2950 if response.status_code != 201:
2951 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2952 .format(response.status_code,response.content))
2953 else:
2954 network = networkType.parseString(response.content, True)
2955 create_nw_task = network.get_Tasks().get_Task()[0]
2956
2957 # if we all ok we respond with content after network creation completes
2958 # otherwise by default return None
2959 if create_nw_task is not None:
2960 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2961 status = vca.block_until_completed(create_nw_task)
2962 if status:
2963 return response.content
2964 else:
2965 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2966 .format(response.content))
2967 except Exception as exp:
2968 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2969
2970 return None
2971
2972 def convert_cidr_to_netmask(self, cidr_ip=None):
2973 """
2974 Method sets convert CIDR netmask address to normal IP format
2975 Args:
2976 cidr_ip : CIDR IP address
2977 Returns:
2978 netmask : Converted netmask
2979 """
2980 if cidr_ip is not None:
2981 if '/' in cidr_ip:
2982 network, net_bits = cidr_ip.split('/')
2983 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2984 else:
2985 netmask = cidr_ip
2986 return netmask
2987 return None
2988
2989 def get_provider_rest(self, vca=None):
2990 """
2991 Method gets provider vdc view from vcloud director
2992
2993 Args:
2994 network_name - is network name to be created.
2995 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2996 It optional attribute. by default if no parent network indicate the first available will be used.
2997
2998 Returns:
2999 The return xml content of respond or None
3000 """
3001
3002 url_list = [vca.host, '/api/admin']
3003 response = Http.get(url=''.join(url_list),
3004 headers=vca.vcloud_session.get_vcloud_headers(),
3005 verify=vca.verify,
3006 logger=vca.logger)
3007
3008 if response.status_code == requests.codes.ok:
3009 return response.content
3010 return None
3011
3012 def create_vdc(self, vdc_name=None):
3013
3014 vdc_dict = {}
3015
3016 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3017 if xml_content is not None:
3018 try:
3019 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3020 for child in task_resp_xmlroot:
3021 if child.tag.split("}")[1] == 'Owner':
3022 vdc_id = child.attrib.get('href').split("/")[-1]
3023 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3024 return vdc_dict
3025 except:
3026 self.logger.debug("Respond body {}".format(xml_content))
3027
3028 return None
3029
3030 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3031 """
3032 Method create vdc in vCloud director based on VDC template.
3033 it uses pre-defined template that must be named openmano
3034
3035 Args:
3036 vdc_name - name of a new vdc.
3037
3038 Returns:
3039 The return xml content of respond or None
3040 """
3041
3042 self.logger.info("Creating new vdc {}".format(vdc_name))
3043 vca = self.connect()
3044 if not vca:
3045 raise vimconn.vimconnConnectionException("self.connect() is failed")
3046 if vdc_name is None:
3047 return None
3048
3049 url_list = [vca.host, '/api/vdcTemplates']
3050 vm_list_rest_call = ''.join(url_list)
3051 response = Http.get(url=vm_list_rest_call,
3052 headers=vca.vcloud_session.get_vcloud_headers(),
3053 verify=vca.verify,
3054 logger=vca.logger)
3055
3056 # container url to a template
3057 vdc_template_ref = None
3058 try:
3059 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3060 for child in vm_list_xmlroot:
3061 # application/vnd.vmware.admin.providervdc+xml
3062 # we need find a template from witch we instantiate VDC
3063 if child.tag.split("}")[1] == 'VdcTemplate':
3064 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3065 vdc_template_ref = child.attrib.get('href')
3066 except:
3067 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3068 self.logger.debug("Respond body {}".format(response.content))
3069 return None
3070
3071 # if we didn't found required pre defined template we return None
3072 if vdc_template_ref is None:
3073 return None
3074
3075 try:
3076 # instantiate vdc
3077 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3078 vm_list_rest_call = ''.join(url_list)
3079 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3080 <Source href="{1:s}"></Source>
3081 <Description>opnemano</Description>
3082 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3083 headers = vca.vcloud_session.get_vcloud_headers()
3084 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3085 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3086 logger=vca.logger)
3087
3088 vdc_task = taskType.parseString(response.content, True)
3089 if type(vdc_task) is GenericTask:
3090 self.vca.block_until_completed(vdc_task)
3091
3092 # if we all ok we respond with content otherwise by default None
3093 if response.status_code >= 200 and response.status_code < 300:
3094 return response.content
3095 return None
3096 except:
3097 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3098 self.logger.debug("Respond body {}".format(response.content))
3099
3100 return None
3101
3102 def create_vdc_rest(self, vdc_name=None):
3103 """
3104 Method create network in vCloud director
3105
3106 Args:
3107 network_name - is network name to be created.
3108 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3109 It optional attribute. by default if no parent network indicate the first available will be used.
3110
3111 Returns:
3112 The return network uuid or return None
3113 """
3114
3115 self.logger.info("Creating new vdc {}".format(vdc_name))
3116
3117 vca = self.connect_as_admin()
3118 if not vca:
3119 raise vimconn.vimconnConnectionException("self.connect() is failed")
3120 if vdc_name is None:
3121 return None
3122
3123 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3124 vm_list_rest_call = ''.join(url_list)
3125 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3126 response = Http.get(url=vm_list_rest_call,
3127 headers=vca.vcloud_session.get_vcloud_headers(),
3128 verify=vca.verify,
3129 logger=vca.logger)
3130
3131 provider_vdc_ref = None
3132 add_vdc_rest_url = None
3133 available_networks = None
3134
3135 if response.status_code != requests.codes.ok:
3136 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3137 response.status_code))
3138 return None
3139 else:
3140 try:
3141 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3142 for child in vm_list_xmlroot:
3143 # application/vnd.vmware.admin.providervdc+xml
3144 if child.tag.split("}")[1] == 'Link':
3145 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3146 and child.attrib.get('rel') == 'add':
3147 add_vdc_rest_url = child.attrib.get('href')
3148 except:
3149 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3150 self.logger.debug("Respond body {}".format(response.content))
3151 return None
3152
3153 response = self.get_provider_rest(vca=vca)
3154 try:
3155 vm_list_xmlroot = XmlElementTree.fromstring(response)
3156 for child in vm_list_xmlroot:
3157 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3158 for sub_child in child:
3159 provider_vdc_ref = sub_child.attrib.get('href')
3160 except:
3161 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3162 self.logger.debug("Respond body {}".format(response))
3163 return None
3164
3165 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3166 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3167 <AllocationModel>ReservationPool</AllocationModel>
3168 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3169 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3170 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3171 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3172 <ProviderVdcReference
3173 name="Main Provider"
3174 href="{2:s}" />
3175 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3176 escape(vdc_name),
3177 provider_vdc_ref)
3178
3179 headers = vca.vcloud_session.get_vcloud_headers()
3180 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3181 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3182 logger=vca.logger)
3183
3184 # if we all ok we respond with content otherwise by default None
3185 if response.status_code == 201:
3186 return response.content
3187 return None
3188
3189 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3190 """
3191 Method retrieve vapp detail from vCloud director
3192
3193 Args:
3194 vapp_uuid - is vapp identifier.
3195
3196 Returns:
3197 The return network uuid or return None
3198 """
3199
3200 parsed_respond = {}
3201 vca = None
3202
3203 if need_admin_access:
3204 vca = self.connect_as_admin()
3205 else:
3206 vca = self.vca
3207
3208 if not vca:
3209 raise vimconn.vimconnConnectionException("self.connect() is failed")
3210 if vapp_uuid is None:
3211 return None
3212
3213 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3214 get_vapp_restcall = ''.join(url_list)
3215
3216 if vca.vcloud_session and vca.vcloud_session.organization:
3217 response = Http.get(url=get_vapp_restcall,
3218 headers=vca.vcloud_session.get_vcloud_headers(),
3219 verify=vca.verify,
3220 logger=vca.logger)
3221
3222 if response.status_code == 403:
3223 if need_admin_access == False:
3224 response = self.retry_rest('GET', get_vapp_restcall)
3225
3226 if response.status_code != requests.codes.ok:
3227 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3228 response.status_code))
3229 return parsed_respond
3230
3231 try:
3232 xmlroot_respond = XmlElementTree.fromstring(response.content)
3233 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3234
3235 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3236 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3237 'vmw': 'http://www.vmware.com/schema/ovf',
3238 'vm': 'http://www.vmware.com/vcloud/v1.5',
3239 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3240 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3241 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3242 }
3243
3244 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3245 if created_section is not None:
3246 parsed_respond['created'] = created_section.text
3247
3248 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3249 if network_section is not None and 'networkName' in network_section.attrib:
3250 parsed_respond['networkname'] = network_section.attrib['networkName']
3251
3252 ipscopes_section = \
3253 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3254 namespaces)
3255 if ipscopes_section is not None:
3256 for ipscope in ipscopes_section:
3257 for scope in ipscope:
3258 tag_key = scope.tag.split("}")[1]
3259 if tag_key == 'IpRanges':
3260 ip_ranges = scope.getchildren()
3261 for ipblock in ip_ranges:
3262 for block in ipblock:
3263 parsed_respond[block.tag.split("}")[1]] = block.text
3264 else:
3265 parsed_respond[tag_key] = scope.text
3266
3267 # parse children section for other attrib
3268 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3269 if children_section is not None:
3270 parsed_respond['name'] = children_section.attrib['name']
3271 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3272 if "nestedHypervisorEnabled" in children_section.attrib else None
3273 parsed_respond['deployed'] = children_section.attrib['deployed']
3274 parsed_respond['status'] = children_section.attrib['status']
3275 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3276 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3277 nic_list = []
3278 for adapters in network_adapter:
3279 adapter_key = adapters.tag.split("}")[1]
3280 if adapter_key == 'PrimaryNetworkConnectionIndex':
3281 parsed_respond['primarynetwork'] = adapters.text
3282 if adapter_key == 'NetworkConnection':
3283 vnic = {}
3284 if 'network' in adapters.attrib:
3285 vnic['network'] = adapters.attrib['network']
3286 for adapter in adapters:
3287 setting_key = adapter.tag.split("}")[1]
3288 vnic[setting_key] = adapter.text
3289 nic_list.append(vnic)
3290
3291 for link in children_section:
3292 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3293 if link.attrib['rel'] == 'screen:acquireTicket':
3294 parsed_respond['acquireTicket'] = link.attrib
3295 if link.attrib['rel'] == 'screen:acquireMksTicket':
3296 parsed_respond['acquireMksTicket'] = link.attrib
3297
3298 parsed_respond['interfaces'] = nic_list
3299 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3300 if vCloud_extension_section is not None:
3301 vm_vcenter_info = {}
3302 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3303 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3304 if vmext is not None:
3305 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3306 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3307
3308 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3309 vm_virtual_hardware_info = {}
3310 if virtual_hardware_section is not None:
3311 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3312 if item.find("rasd:Description",namespaces).text == "Hard disk":
3313 disk_size = item.find("rasd:HostResource" ,namespaces
3314 ).attrib["{"+namespaces['vm']+"}capacity"]
3315
3316 vm_virtual_hardware_info["disk_size"]= disk_size
3317 break
3318
3319 for link in virtual_hardware_section:
3320 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3321 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3322 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3323 break
3324
3325 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3326 except Exception as exp :
3327 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3328 return parsed_respond
3329
3330 def acuire_console(self, vm_uuid=None):
3331
3332 if vm_uuid is None:
3333 return None
3334
3335 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3336 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3337 console_dict = vm_dict['acquireTicket']
3338 console_rest_call = console_dict['href']
3339
3340 response = Http.post(url=console_rest_call,
3341 headers=self.vca.vcloud_session.get_vcloud_headers(),
3342 verify=self.vca.verify,
3343 logger=self.vca.logger)
3344 if response.status_code == 403:
3345 response = self.retry_rest('POST', console_rest_call)
3346
3347 if response.status_code == requests.codes.ok:
3348 return response.content
3349
3350 return None
3351
3352 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3353 """
3354 Method retrieve vm disk details
3355
3356 Args:
3357 vapp_uuid - is vapp identifier.
3358 flavor_disk - disk size as specified in VNFD (flavor)
3359
3360 Returns:
3361 The return network uuid or return None
3362 """
3363 status = None
3364 try:
3365 #Flavor disk is in GB convert it into MB
3366 flavor_disk = int(flavor_disk) * 1024
3367 vm_details = self.get_vapp_details_rest(vapp_uuid)
3368 if vm_details:
3369 vm_name = vm_details["name"]
3370 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3371
3372 if vm_details and "vm_virtual_hardware" in vm_details:
3373 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3374 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3375
3376 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3377
3378 if flavor_disk > vm_disk:
3379 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3380 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3381 vm_disk, flavor_disk ))
3382 else:
3383 status = True
3384 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3385
3386 return status
3387 except Exception as exp:
3388 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3389
3390
3391 def modify_vm_disk_rest(self, disk_href , disk_size):
3392 """
3393 Method retrieve modify vm disk size
3394
3395 Args:
3396 disk_href - vCD API URL to GET and PUT disk data
3397 disk_size - disk size as specified in VNFD (flavor)
3398
3399 Returns:
3400 The return network uuid or return None
3401 """
3402 if disk_href is None or disk_size is None:
3403 return None
3404
3405 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3406 response = Http.get(url=disk_href,
3407 headers=self.vca.vcloud_session.get_vcloud_headers(),
3408 verify=self.vca.verify,
3409 logger=self.vca.logger)
3410
3411 if response.status_code == 403:
3412 response = self.retry_rest('GET', disk_href)
3413
3414 if response.status_code != requests.codes.ok:
3415 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3416 response.status_code))
3417 return None
3418 try:
3419 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3420 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3421 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3422
3423 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3424 if item.find("rasd:Description",namespaces).text == "Hard disk":
3425 disk_item = item.find("rasd:HostResource" ,namespaces )
3426 if disk_item is not None:
3427 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3428 break
3429
3430 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3431 xml_declaration=True)
3432
3433 #Send PUT request to modify disk size
3434 headers = self.vca.vcloud_session.get_vcloud_headers()
3435 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3436
3437 response = Http.put(url=disk_href,
3438 data=data,
3439 headers=headers,
3440 verify=self.vca.verify, logger=self.logger)
3441
3442 if response.status_code == 403:
3443 add_headers = {'Content-Type': headers['Content-Type']}
3444 response = self.retry_rest('PUT', disk_href, add_headers, data)
3445
3446 if response.status_code != 202:
3447 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3448 response.status_code))
3449 else:
3450 modify_disk_task = taskType.parseString(response.content, True)
3451 if type(modify_disk_task) is GenericTask:
3452 status = self.vca.block_until_completed(modify_disk_task)
3453 return status
3454
3455 return None
3456
3457 except Exception as exp :
3458 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3459 return None
3460
3461 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3462 """
3463 Method to attach pci devices to VM
3464
3465 Args:
3466 vapp_uuid - uuid of vApp/VM
3467 pci_devices - pci devices infromation as specified in VNFD (flavor)
3468
3469 Returns:
3470 The status of add pci device task , vm object and
3471 vcenter_conect object
3472 """
3473 vm_obj = None
3474 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3475 vcenter_conect, content = self.get_vcenter_content()
3476 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3477
3478 if vm_moref_id:
3479 try:
3480 no_of_pci_devices = len(pci_devices)
3481 if no_of_pci_devices > 0:
3482 #Get VM and its host
3483 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3484 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3485 if host_obj and vm_obj:
3486 #get PCI devies from host on which vapp is currently installed
3487 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3488
3489 if avilable_pci_devices is None:
3490 #find other hosts with active pci devices
3491 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3492 content,
3493 no_of_pci_devices
3494 )
3495
3496 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3497 #Migrate vm to the host where PCI devices are availble
3498 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3499 task = self.relocate_vm(new_host_obj, vm_obj)
3500 if task is not None:
3501 result = self.wait_for_vcenter_task(task, vcenter_conect)
3502 self.logger.info("Migrate VM status: {}".format(result))
3503 host_obj = new_host_obj
3504 else:
3505 self.logger.info("Fail to migrate VM : {}".format(result))
3506 raise vimconn.vimconnNotFoundException(
3507 "Fail to migrate VM : {} to host {}".format(
3508 vmname_andid,
3509 new_host_obj)
3510 )
3511
3512 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3513 #Add PCI devices one by one
3514 for pci_device in avilable_pci_devices:
3515 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3516 if task:
3517 status= self.wait_for_vcenter_task(task, vcenter_conect)
3518 if status:
3519 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3520 else:
3521 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3522 return True, vm_obj, vcenter_conect
3523 else:
3524 self.logger.error("Currently there is no host with"\
3525 " {} number of avaialble PCI devices required for VM {}".format(
3526 no_of_pci_devices,
3527 vmname_andid)
3528 )
3529 raise vimconn.vimconnNotFoundException(
3530 "Currently there is no host with {} "\
3531 "number of avaialble PCI devices required for VM {}".format(
3532 no_of_pci_devices,
3533 vmname_andid))
3534 else:
3535 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3536
3537 except vmodl.MethodFault as error:
3538 self.logger.error("Error occurred while adding PCI devices {} ",error)
3539 return None, vm_obj, vcenter_conect
3540
3541 def get_vm_obj(self, content, mob_id):
3542 """
3543 Method to get the vsphere VM object associated with a given morf ID
3544 Args:
3545 vapp_uuid - uuid of vApp/VM
3546 content - vCenter content object
3547 mob_id - mob_id of VM
3548
3549 Returns:
3550 VM and host object
3551 """
3552 vm_obj = None
3553 host_obj = None
3554 try :
3555 container = content.viewManager.CreateContainerView(content.rootFolder,
3556 [vim.VirtualMachine], True
3557 )
3558 for vm in container.view:
3559 mobID = vm._GetMoId()
3560 if mobID == mob_id:
3561 vm_obj = vm
3562 host_obj = vm_obj.runtime.host
3563 break
3564 except Exception as exp:
3565 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3566 return host_obj, vm_obj
3567
3568 def get_pci_devices(self, host, need_devices):
3569 """
3570 Method to get the details of pci devices on given host
3571 Args:
3572 host - vSphere host object
3573 need_devices - number of pci devices needed on host
3574
3575 Returns:
3576 array of pci devices
3577 """
3578 all_devices = []
3579 all_device_ids = []
3580 used_devices_ids = []
3581
3582 try:
3583 if host:
3584 pciPassthruInfo = host.config.pciPassthruInfo
3585 pciDevies = host.hardware.pciDevice
3586
3587 for pci_status in pciPassthruInfo:
3588 if pci_status.passthruActive:
3589 for device in pciDevies:
3590 if device.id == pci_status.id:
3591 all_device_ids.append(device.id)
3592 all_devices.append(device)
3593
3594 #check if devices are in use
3595 avalible_devices = all_devices
3596 for vm in host.vm:
3597 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3598 vm_devices = vm.config.hardware.device
3599 for device in vm_devices:
3600 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3601 if device.backing.id in all_device_ids:
3602 for use_device in avalible_devices:
3603 if use_device.id == device.backing.id:
3604 avalible_devices.remove(use_device)
3605 used_devices_ids.append(device.backing.id)
3606 self.logger.debug("Device {} from devices {}"\
3607 "is in use".format(device.backing.id,
3608 device)
3609 )
3610 if len(avalible_devices) < need_devices:
3611 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3612 need_devices))
3613 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3614 avalible_devices))
3615 return None
3616 else:
3617 required_devices = avalible_devices[:need_devices]
3618 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3619 len(avalible_devices),
3620 host,
3621 need_devices))
3622 self.logger.info("Retruning {} devices as {}".format(need_devices,
3623 required_devices ))
3624 return required_devices
3625
3626 except Exception as exp:
3627 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3628
3629 return None
3630
3631 def get_host_and_PCIdevices(self, content, need_devices):
3632 """
3633 Method to get the details of pci devices infromation on all hosts
3634
3635 Args:
3636 content - vSphere host object
3637 need_devices - number of pci devices needed on host
3638
3639 Returns:
3640 array of pci devices and host object
3641 """
3642 host_obj = None
3643 pci_device_objs = None
3644 try:
3645 if content:
3646 container = content.viewManager.CreateContainerView(content.rootFolder,
3647 [vim.HostSystem], True)
3648 for host in container.view:
3649 devices = self.get_pci_devices(host, need_devices)
3650 if devices:
3651 host_obj = host
3652 pci_device_objs = devices
3653 break
3654 except Exception as exp:
3655 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3656
3657 return host_obj,pci_device_objs
3658
3659 def relocate_vm(self, dest_host, vm) :
3660 """
3661 Method to get the relocate VM to new host
3662
3663 Args:
3664 dest_host - vSphere host object
3665 vm - vSphere VM object
3666
3667 Returns:
3668 task object
3669 """
3670 task = None
3671 try:
3672 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3673 task = vm.Relocate(relocate_spec)
3674 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3675 except Exception as exp:
3676 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3677 dest_host, vm, exp))
3678 return task
3679
3680 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3681 """
3682 Waits and provides updates on a vSphere task
3683 """
3684 while task.info.state == vim.TaskInfo.State.running:
3685 time.sleep(2)
3686
3687 if task.info.state == vim.TaskInfo.State.success:
3688 if task.info.result is not None and not hideResult:
3689 self.logger.info('{} completed successfully, result: {}'.format(
3690 actionName,
3691 task.info.result))
3692 else:
3693 self.logger.info('Task {} completed successfully.'.format(actionName))
3694 else:
3695 self.logger.error('{} did not complete successfully: {} '.format(
3696 actionName,
3697 task.info.error)
3698 )
3699
3700 return task.info.result
3701
3702 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3703 """
3704 Method to add pci device in given VM
3705
3706 Args:
3707 host_object - vSphere host object
3708 vm_object - vSphere VM object
3709 host_pci_dev - host_pci_dev must be one of the devices from the
3710 host_object.hardware.pciDevice list
3711 which is configured as a PCI passthrough device
3712
3713 Returns:
3714 task object
3715 """
3716 task = None
3717 if vm_object and host_object and host_pci_dev:
3718 try :
3719 #Add PCI device to VM
3720 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3721 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3722
3723 if host_pci_dev.id not in systemid_by_pciid:
3724 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3725 return None
3726
3727 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3728 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3729 id=host_pci_dev.id,
3730 systemId=systemid_by_pciid[host_pci_dev.id],
3731 vendorId=host_pci_dev.vendorId,
3732 deviceName=host_pci_dev.deviceName)
3733
3734 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3735
3736 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3737 new_device_config.operation = "add"
3738 vmConfigSpec = vim.vm.ConfigSpec()
3739 vmConfigSpec.deviceChange = [new_device_config]
3740
3741 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3742 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3743 host_pci_dev, vm_object, host_object)
3744 )
3745 except Exception as exp:
3746 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3747 host_pci_dev,
3748 vm_object,
3749 exp))
3750 return task
3751
3752 def get_vm_vcenter_info(self):
3753 """
3754 Method to get details of vCenter and vm
3755
3756 Args:
3757 vapp_uuid - uuid of vApp or VM
3758
3759 Returns:
3760 Moref Id of VM and deails of vCenter
3761 """
3762 vm_vcenter_info = {}
3763
3764 if self.vcenter_ip is not None:
3765 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3766 else:
3767 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3768 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3769 if self.vcenter_port is not None:
3770 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3771 else:
3772 raise vimconn.vimconnException(message="vCenter port is not provided."\
3773 " Please provide vCenter port while attaching datacenter to tenant in --config")
3774 if self.vcenter_user is not None:
3775 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3776 else:
3777 raise vimconn.vimconnException(message="vCenter user is not provided."\
3778 " Please provide vCenter user while attaching datacenter to tenant in --config")
3779
3780 if self.vcenter_password is not None:
3781 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3782 else:
3783 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3784 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3785
3786 return vm_vcenter_info
3787
3788
3789 def get_vm_pci_details(self, vmuuid):
3790 """
3791 Method to get VM PCI device details from vCenter
3792
3793 Args:
3794 vm_obj - vSphere VM object
3795
3796 Returns:
3797 dict of PCI devives attached to VM
3798
3799 """
3800 vm_pci_devices_info = {}
3801 try:
3802 vcenter_conect, content = self.get_vcenter_content()
3803 vm_moref_id = self.get_vm_moref_id(vmuuid)
3804 if vm_moref_id:
3805 #Get VM and its host
3806 if content:
3807 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3808 if host_obj and vm_obj:
3809 vm_pci_devices_info["host_name"]= host_obj.name
3810 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3811 for device in vm_obj.config.hardware.device:
3812 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3813 device_details={'devide_id':device.backing.id,
3814 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3815 }
3816 vm_pci_devices_info[device.deviceInfo.label] = device_details
3817 else:
3818 self.logger.error("Can not connect to vCenter while getting "\
3819 "PCI devices infromationn")
3820 return vm_pci_devices_info
3821 except Exception as exp:
3822 self.logger.error("Error occurred while getting VM infromationn"\
3823 " for VM : {}".format(exp))
3824 raise vimconn.vimconnException(message=exp)
3825
3826 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3827 """
3828 Method to add network adapter type to vm
3829 Args :
3830 network_name - name of network
3831 primary_nic_index - int value for primary nic index
3832 nicIndex - int value for nic index
3833 nic_type - specify model name to which add to vm
3834 Returns:
3835 None
3836 """
3837
3838 try:
3839 ip_address = None
3840 floating_ip = False
3841 if 'floating_ip' in net: floating_ip = net['floating_ip']
3842
3843 # Stub for ip_address feature
3844 if 'ip_address' in net: ip_address = net['ip_address']
3845
3846 if floating_ip:
3847 allocation_mode = "POOL"
3848 elif ip_address:
3849 allocation_mode = "MANUAL"
3850 else:
3851 allocation_mode = "DHCP"
3852
3853 if not nic_type:
3854 for vms in vapp._get_vms():
3855 vm_id = (vms.id).split(':')[-1]
3856
3857 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3858
3859 response = Http.get(url=url_rest_call,
3860 headers=self.vca.vcloud_session.get_vcloud_headers(),
3861 verify=self.vca.verify,
3862 logger=self.vca.logger)
3863
3864 if response.status_code == 403:
3865 response = self.retry_rest('GET', url_rest_call)
3866
3867 if response.status_code != 200:
3868 self.logger.error("REST call {} failed reason : {}"\
3869 "status code : {}".format(url_rest_call,
3870 response.content,
3871 response.status_code))
3872 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3873 "network connection section")
3874
3875 data = response.content
3876 if '<PrimaryNetworkConnectionIndex>' not in data:
3877 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3878 <NetworkConnection network="{}">
3879 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3880 <IsConnected>true</IsConnected>
3881 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3882 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3883 allocation_mode)
3884 # Stub for ip_address feature
3885 if ip_address:
3886 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3887 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3888
3889 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3890 else:
3891 new_item = """<NetworkConnection network="{}">
3892 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3893 <IsConnected>true</IsConnected>
3894 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3895 </NetworkConnection>""".format(network_name, nicIndex,
3896 allocation_mode)
3897 # Stub for ip_address feature
3898 if ip_address:
3899 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3900 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3901
3902 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3903
3904 headers = self.vca.vcloud_session.get_vcloud_headers()
3905 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3906 response = Http.put(url=url_rest_call, headers=headers, data=data,
3907 verify=self.vca.verify,
3908 logger=self.vca.logger)
3909
3910 if response.status_code == 403:
3911 add_headers = {'Content-Type': headers['Content-Type']}
3912 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3913
3914 if response.status_code != 202:
3915 self.logger.error("REST call {} failed reason : {}"\
3916 "status code : {} ".format(url_rest_call,
3917 response.content,
3918 response.status_code))
3919 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3920 "network connection section")
3921 else:
3922 nic_task = taskType.parseString(response.content, True)
3923 if isinstance(nic_task, GenericTask):
3924 self.vca.block_until_completed(nic_task)
3925 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3926 "default NIC type".format(vm_id))
3927 else:
3928 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3929 "connect NIC type".format(vm_id))
3930 else:
3931 for vms in vapp._get_vms():
3932 vm_id = (vms.id).split(':')[-1]
3933
3934 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3935
3936 response = Http.get(url=url_rest_call,
3937 headers=self.vca.vcloud_session.get_vcloud_headers(),
3938 verify=self.vca.verify,
3939 logger=self.vca.logger)
3940
3941 if response.status_code == 403:
3942 response = self.retry_rest('GET', url_rest_call)
3943
3944 if response.status_code != 200:
3945 self.logger.error("REST call {} failed reason : {}"\
3946 "status code : {}".format(url_rest_call,
3947 response.content,
3948 response.status_code))
3949 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3950 "network connection section")
3951 data = response.content
3952 if '<PrimaryNetworkConnectionIndex>' not in data:
3953 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3954 <NetworkConnection network="{}">
3955 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3956 <IsConnected>true</IsConnected>
3957 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3958 <NetworkAdapterType>{}</NetworkAdapterType>
3959 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3960 allocation_mode, nic_type)
3961 # Stub for ip_address feature
3962 if ip_address:
3963 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3964 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3965
3966 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3967 else:
3968 new_item = """<NetworkConnection network="{}">
3969 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3970 <IsConnected>true</IsConnected>
3971 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3972 <NetworkAdapterType>{}</NetworkAdapterType>
3973 </NetworkConnection>""".format(network_name, nicIndex,
3974 allocation_mode, nic_type)
3975 # Stub for ip_address feature
3976 if ip_address:
3977 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3978 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3979
3980 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3981
3982 headers = self.vca.vcloud_session.get_vcloud_headers()
3983 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3984 response = Http.put(url=url_rest_call, headers=headers, data=data,
3985 verify=self.vca.verify,
3986 logger=self.vca.logger)
3987
3988 if response.status_code == 403:
3989 add_headers = {'Content-Type': headers['Content-Type']}
3990 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3991
3992 if response.status_code != 202:
3993 self.logger.error("REST call {} failed reason : {}"\
3994 "status code : {}".format(url_rest_call,
3995 response.content,
3996 response.status_code))
3997 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3998 "network connection section")
3999 else:
4000 nic_task = taskType.parseString(response.content, True)
4001 if isinstance(nic_task, GenericTask):
4002 self.vca.block_until_completed(nic_task)
4003 self.logger.info("add_network_adapter_to_vms(): VM {} "\
4004 "conneced to NIC type {}".format(vm_id, nic_type))
4005 else:
4006 self.logger.error("add_network_adapter_to_vms(): VM {} "\
4007 "failed to connect NIC type {}".format(vm_id, nic_type))
4008 except Exception as exp:
4009 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
4010 "while adding Network adapter")
4011 raise vimconn.vimconnException(message=exp)
4012
4013
4014 def set_numa_affinity(self, vmuuid, paired_threads_id):
4015 """
4016 Method to assign numa affinity in vm configuration parammeters
4017 Args :
4018 vmuuid - vm uuid
4019 paired_threads_id - one or more virtual processor
4020 numbers
4021 Returns:
4022 return if True
4023 """
4024 try:
4025 vcenter_conect, content = self.get_vcenter_content()
4026 vm_moref_id = self.get_vm_moref_id(vmuuid)
4027
4028 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4029 if vm_obj:
4030 config_spec = vim.vm.ConfigSpec()
4031 config_spec.extraConfig = []
4032 opt = vim.option.OptionValue()
4033 opt.key = 'numa.nodeAffinity'
4034 opt.value = str(paired_threads_id)
4035 config_spec.extraConfig.append(opt)
4036 task = vm_obj.ReconfigVM_Task(config_spec)
4037 if task:
4038 result = self.wait_for_vcenter_task(task, vcenter_conect)
4039 extra_config = vm_obj.config.extraConfig
4040 flag = False
4041 for opts in extra_config:
4042 if 'numa.nodeAffinity' in opts.key:
4043 flag = True
4044 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4045 "value {} for vm {}".format(opt.value, vm_obj))
4046 if flag:
4047 return
4048 else:
4049 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4050 except Exception as exp:
4051 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4052 "for VM {} : {}".format(vm_obj, vm_moref_id))
4053 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4054 "affinity".format(exp))
4055
4056
4057 def cloud_init(self, vapp, cloud_config):
4058 """
4059 Method to inject ssh-key
4060 vapp - vapp object
4061 cloud_config a dictionary with:
4062 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4063 'users': (optional) list of users to be inserted, each item is a dict with:
4064 'name': (mandatory) user name,
4065 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4066 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4067 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4068 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4069 'dest': (mandatory) string with the destination absolute path
4070 'encoding': (optional, by default text). Can be one of:
4071 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4072 'content' (mandatory): string with the content of the file
4073 'permissions': (optional) string with file permissions, typically octal notation '0644'
4074 'owner': (optional) file owner, string with the format 'owner:group'
4075 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4076 """
4077 try:
4078 if not isinstance(cloud_config, dict):
4079 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4080 else:
4081 key_pairs = []
4082 userdata = []
4083 if "key-pairs" in cloud_config:
4084 key_pairs = cloud_config["key-pairs"]
4085
4086 if "users" in cloud_config:
4087 userdata = cloud_config["users"]
4088
4089 self.logger.debug("cloud_init : Guest os customization started..")
4090 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4091 self.guest_customization(vapp, customize_script)
4092
4093 except Exception as exp:
4094 self.logger.error("cloud_init : exception occurred while injecting "\
4095 "ssh-key")
4096 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4097 "ssh-key".format(exp))
4098
4099 def format_script(self, key_pairs=[], users_list=[]):
4100 bash_script = """
4101 #!/bin/bash
4102 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4103 if [ "$1" = "precustomization" ];then
4104 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4105 """
4106
4107 keys = "\n".join(key_pairs)
4108 if keys:
4109 keys_data = """
4110 if [ ! -d /root/.ssh ];then
4111 mkdir /root/.ssh
4112 chown root:root /root/.ssh
4113 chmod 700 /root/.ssh
4114 touch /root/.ssh/authorized_keys
4115 chown root:root /root/.ssh/authorized_keys
4116 chmod 600 /root/.ssh/authorized_keys
4117 # make centos with selinux happy
4118 which restorecon && restorecon -Rv /root/.ssh
4119 else
4120 touch /root/.ssh/authorized_keys
4121 chown root:root /root/.ssh/authorized_keys
4122 chmod 600 /root/.ssh/authorized_keys
4123 fi
4124 echo '{key}' >> /root/.ssh/authorized_keys
4125 """.format(key=keys)
4126
4127 bash_script+= keys_data
4128
4129 for user in users_list:
4130 if 'name' in user: user_name = user['name']
4131 if 'key-pairs' in user:
4132 user_keys = "\n".join(user['key-pairs'])
4133 else:
4134 user_keys = None
4135
4136 add_user_name = """
4137 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4138 """.format(user_name=user_name)
4139
4140 bash_script+= add_user_name
4141
4142 if user_keys:
4143 user_keys_data = """
4144 mkdir /home/{user_name}/.ssh
4145 chown {user_name}:{user_name} /home/{user_name}/.ssh
4146 chmod 700 /home/{user_name}/.ssh
4147 touch /home/{user_name}/.ssh/authorized_keys
4148 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4149 chmod 600 /home/{user_name}/.ssh/authorized_keys
4150 # make centos with selinux happy
4151 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4152 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4153 """.format(user_name=user_name,user_key=user_keys)
4154
4155 bash_script+= user_keys_data
4156
4157 return bash_script+"\n\tfi"
4158
4159 def guest_customization(self, vapp, customize_script):
4160 """
4161 Method to customize guest os
4162 vapp - Vapp object
4163 customize_script - Customize script to be run at first boot of VM.
4164 """
4165 for vm in vapp._get_vms():
4166 vm_name = vm.name
4167 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4168 if isinstance(task, GenericTask):
4169 self.vca.block_until_completed(task)
4170 self.logger.info("guest_customization : customized guest os task "\
4171 "completed for VM {}".format(vm_name))
4172 else:
4173 self.logger.error("guest_customization : task for customized guest os"\
4174 "failed for VM {}".format(vm_name))
4175 raise vimconn.vimconnException("guest_customization : failed to perform"\
4176 "guest os customization on VM {}".format(vm_name))
4177
4178 def add_new_disk(self, vapp_uuid, disk_size):
4179 """
4180 Method to create an empty vm disk
4181
4182 Args:
4183 vapp_uuid - is vapp identifier.
4184 disk_size - size of disk to be created in GB
4185
4186 Returns:
4187 None
4188 """
4189 status = False
4190 vm_details = None
4191 try:
4192 #Disk size in GB, convert it into MB
4193 if disk_size is not None:
4194 disk_size_mb = int(disk_size) * 1024
4195 vm_details = self.get_vapp_details_rest(vapp_uuid)
4196
4197 if vm_details and "vm_virtual_hardware" in vm_details:
4198 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4199 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4200 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4201
4202 except Exception as exp:
4203 msg = "Error occurred while creating new disk {}.".format(exp)
4204 self.rollback_newvm(vapp_uuid, msg)
4205
4206 if status:
4207 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4208 else:
4209 #If failed to add disk, delete VM
4210 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4211 self.rollback_newvm(vapp_uuid, msg)
4212
4213
4214 def add_new_disk_rest(self, disk_href, disk_size_mb):
4215 """
4216 Retrives vApp Disks section & add new empty disk
4217
4218 Args:
4219 disk_href: Disk section href to addd disk
4220 disk_size_mb: Disk size in MB
4221
4222 Returns: Status of add new disk task
4223 """
4224 status = False
4225 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4226 response = Http.get(url=disk_href,
4227 headers=self.vca.vcloud_session.get_vcloud_headers(),
4228 verify=self.vca.verify,
4229 logger=self.vca.logger)
4230
4231 if response.status_code == 403:
4232 response = self.retry_rest('GET', disk_href)
4233
4234 if response.status_code != requests.codes.ok:
4235 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4236 .format(disk_href, response.status_code))
4237 return status
4238 try:
4239 #Find but type & max of instance IDs assigned to disks
4240 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4241 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4242 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4243 instance_id = 0
4244 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4245 if item.find("rasd:Description",namespaces).text == "Hard disk":
4246 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4247 if inst_id > instance_id:
4248 instance_id = inst_id
4249 disk_item = item.find("rasd:HostResource" ,namespaces)
4250 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4251 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4252
4253 instance_id = instance_id + 1
4254 new_item = """<Item>
4255 <rasd:Description>Hard disk</rasd:Description>
4256 <rasd:ElementName>New disk</rasd:ElementName>
4257 <rasd:HostResource
4258 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4259 vcloud:capacity="{}"
4260 vcloud:busSubType="{}"
4261 vcloud:busType="{}"></rasd:HostResource>
4262 <rasd:InstanceID>{}</rasd:InstanceID>
4263 <rasd:ResourceType>17</rasd:ResourceType>
4264 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4265
4266 new_data = response.content
4267 #Add new item at the bottom
4268 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4269
4270 # Send PUT request to modify virtual hardware section with new disk
4271 headers = self.vca.vcloud_session.get_vcloud_headers()
4272 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4273
4274 response = Http.put(url=disk_href,
4275 data=new_data,
4276 headers=headers,
4277 verify=self.vca.verify, logger=self.logger)
4278
4279 if response.status_code == 403:
4280 add_headers = {'Content-Type': headers['Content-Type']}
4281 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4282
4283 if response.status_code != 202:
4284 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4285 .format(disk_href, response.status_code, response.content))
4286 else:
4287 add_disk_task = taskType.parseString(response.content, True)
4288 if type(add_disk_task) is GenericTask:
4289 status = self.vca.block_until_completed(add_disk_task)
4290 if not status:
4291 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4292
4293 except Exception as exp:
4294 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4295
4296 return status
4297
4298
4299 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4300 """
4301 Method to add existing disk to vm
4302 Args :
4303 catalogs - List of VDC catalogs
4304 image_id - Catalog ID
4305 template_name - Name of template in catalog
4306 vapp_uuid - UUID of vApp
4307 Returns:
4308 None
4309 """
4310 disk_info = None
4311 vcenter_conect, content = self.get_vcenter_content()
4312 #find moref-id of vm in image
4313 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4314 image_id=image_id,
4315 )
4316
4317 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4318 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4319 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4320 if catalog_vm_moref_id:
4321 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4322 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4323 if catalog_vm_obj:
4324 #find existing disk
4325 disk_info = self.find_disk(catalog_vm_obj)
4326 else:
4327 exp_msg = "No VM with image id {} found".format(image_id)
4328 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4329 else:
4330 exp_msg = "No Image found with image ID {} ".format(image_id)
4331 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4332
4333 if disk_info:
4334 self.logger.info("Existing disk_info : {}".format(disk_info))
4335 #get VM
4336 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4337 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4338 if vm_obj:
4339 status = self.add_disk(vcenter_conect=vcenter_conect,
4340 vm=vm_obj,
4341 disk_info=disk_info,
4342 size=size,
4343 vapp_uuid=vapp_uuid
4344 )
4345 if status:
4346 self.logger.info("Disk from image id {} added to {}".format(image_id,
4347 vm_obj.config.name)
4348 )
4349 else:
4350 msg = "No disk found with image id {} to add in VM {}".format(
4351 image_id,
4352 vm_obj.config.name)
4353 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4354
4355
4356 def find_disk(self, vm_obj):
4357 """
4358 Method to find details of existing disk in VM
4359 Args :
4360 vm_obj - vCenter object of VM
4361 image_id - Catalog ID
4362 Returns:
4363 disk_info : dict of disk details
4364 """
4365 disk_info = {}
4366 if vm_obj:
4367 try:
4368 devices = vm_obj.config.hardware.device
4369 for device in devices:
4370 if type(device) is vim.vm.device.VirtualDisk:
4371 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4372 disk_info["full_path"] = device.backing.fileName
4373 disk_info["datastore"] = device.backing.datastore
4374 disk_info["capacityKB"] = device.capacityInKB
4375 break
4376 except Exception as exp:
4377 self.logger.error("find_disk() : exception occurred while "\
4378 "getting existing disk details :{}".format(exp))
4379 return disk_info
4380
4381
4382 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4383 """
4384 Method to add existing disk in VM
4385 Args :
4386 vcenter_conect - vCenter content object
4387 vm - vCenter vm object
4388 disk_info : dict of disk details
4389 Returns:
4390 status : status of add disk task
4391 """
4392 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4393 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4394 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4395 if size is not None:
4396 #Convert size from GB to KB
4397 sizeKB = int(size) * 1024 * 1024
4398 #compare size of existing disk and user given size.Assign whicherver is greater
4399 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4400 sizeKB, capacityKB))
4401 if sizeKB > capacityKB:
4402 capacityKB = sizeKB
4403
4404 if datastore and fullpath and capacityKB:
4405 try:
4406 spec = vim.vm.ConfigSpec()
4407 # get all disks on a VM, set unit_number to the next available
4408 unit_number = 0
4409 for dev in vm.config.hardware.device:
4410 if hasattr(dev.backing, 'fileName'):
4411 unit_number = int(dev.unitNumber) + 1
4412 # unit_number 7 reserved for scsi controller
4413 if unit_number == 7:
4414 unit_number += 1
4415 if isinstance(dev, vim.vm.device.VirtualDisk):
4416 #vim.vm.device.VirtualSCSIController
4417 controller_key = dev.controllerKey
4418
4419 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4420 unit_number, controller_key))
4421 # add disk here
4422 dev_changes = []
4423 disk_spec = vim.vm.device.VirtualDeviceSpec()
4424 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4425 disk_spec.device = vim.vm.device.VirtualDisk()
4426 disk_spec.device.backing = \
4427 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4428 disk_spec.device.backing.thinProvisioned = True
4429 disk_spec.device.backing.diskMode = 'persistent'
4430 disk_spec.device.backing.datastore = datastore
4431 disk_spec.device.backing.fileName = fullpath
4432
4433 disk_spec.device.unitNumber = unit_number
4434 disk_spec.device.capacityInKB = capacityKB
4435 disk_spec.device.controllerKey = controller_key
4436 dev_changes.append(disk_spec)
4437 spec.deviceChange = dev_changes
4438 task = vm.ReconfigVM_Task(spec=spec)
4439 status = self.wait_for_vcenter_task(task, vcenter_conect)
4440 return status
4441 except Exception as exp:
4442 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4443 "{} to vm {}".format(exp,
4444 fullpath,
4445 vm.config.name)
4446 self.rollback_newvm(vapp_uuid, exp_msg)
4447 else:
4448 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4449 self.rollback_newvm(vapp_uuid, msg)
4450
4451
4452 def get_vcenter_content(self):
4453 """
4454 Get the vsphere content object
4455 """
4456 try:
4457 vm_vcenter_info = self.get_vm_vcenter_info()
4458 except Exception as exp:
4459 self.logger.error("Error occurred while getting vCenter infromationn"\
4460 " for VM : {}".format(exp))
4461 raise vimconn.vimconnException(message=exp)
4462
4463 context = None
4464 if hasattr(ssl, '_create_unverified_context'):
4465 context = ssl._create_unverified_context()
4466
4467 vcenter_conect = SmartConnect(
4468 host=vm_vcenter_info["vm_vcenter_ip"],
4469 user=vm_vcenter_info["vm_vcenter_user"],
4470 pwd=vm_vcenter_info["vm_vcenter_password"],
4471 port=int(vm_vcenter_info["vm_vcenter_port"]),
4472 sslContext=context
4473 )
4474 atexit.register(Disconnect, vcenter_conect)
4475 content = vcenter_conect.RetrieveContent()
4476 return vcenter_conect, content
4477
4478
4479 def get_vm_moref_id(self, vapp_uuid):
4480 """
4481 Get the moref_id of given VM
4482 """
4483 try:
4484 if vapp_uuid:
4485 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4486 if vm_details and "vm_vcenter_info" in vm_details:
4487 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4488
4489 return vm_moref_id
4490
4491 except Exception as exp:
4492 self.logger.error("Error occurred while getting VM moref ID "\
4493 " for VM : {}".format(exp))
4494 return None
4495
4496
4497 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4498 """
4499 Method to get vApp template details
4500 Args :
4501 catalogs - list of VDC catalogs
4502 image_id - Catalog ID to find
4503 template_name : template name in catalog
4504 Returns:
4505 parsed_respond : dict of vApp tempalte details
4506 """
4507 parsed_response = {}
4508
4509 vca = self.connect_as_admin()
4510 if not vca:
4511 raise vimconn.vimconnConnectionException("self.connect() is failed")
4512
4513 try:
4514 catalog = self.get_catalog_obj(image_id, catalogs)
4515 if catalog:
4516 template_name = self.get_catalogbyid(image_id, catalogs)
4517 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4518 if len(catalog_items) == 1:
4519 response = Http.get(catalog_items[0].get_href(),
4520 headers=vca.vcloud_session.get_vcloud_headers(),
4521 verify=vca.verify,
4522 logger=vca.logger)
4523 catalogItem = XmlElementTree.fromstring(response.content)
4524 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4525 vapp_tempalte_href = entity.get("href")
4526 #get vapp details and parse moref id
4527
4528 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4529 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4530 'vmw': 'http://www.vmware.com/schema/ovf',
4531 'vm': 'http://www.vmware.com/vcloud/v1.5',
4532 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4533 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4534 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4535 }
4536
4537 if vca.vcloud_session and vca.vcloud_session.organization:
4538 response = Http.get(url=vapp_tempalte_href,
4539 headers=vca.vcloud_session.get_vcloud_headers(),
4540 verify=vca.verify,
4541 logger=vca.logger
4542 )
4543
4544 if response.status_code != requests.codes.ok:
4545 self.logger.debug("REST API call {} failed. Return status code {}".format(
4546 vapp_tempalte_href, response.status_code))
4547
4548 else:
4549 xmlroot_respond = XmlElementTree.fromstring(response.content)
4550 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4551 if children_section is not None:
4552 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4553 if vCloud_extension_section is not None:
4554 vm_vcenter_info = {}
4555 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4556 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4557 if vmext is not None:
4558 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4559 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4560
4561 except Exception as exp :
4562 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4563
4564 return parsed_response
4565
4566
4567 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4568 """
4569 Method to delete vApp
4570 Args :
4571 vapp_uuid - vApp UUID
4572 msg - Error message to be logged
4573 exp_type : Exception type
4574 Returns:
4575 None
4576 """
4577 if vapp_uuid:
4578 status = self.delete_vminstance(vapp_uuid)
4579 else:
4580 msg = "No vApp ID"
4581 self.logger.error(msg)
4582 if exp_type == "Genric":
4583 raise vimconn.vimconnException(msg)
4584 elif exp_type == "NotFound":
4585 raise vimconn.vimconnNotFoundException(message=msg)
4586
4587 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4588 """
4589 Method to attach SRIOV adapters to VM
4590
4591 Args:
4592 vapp_uuid - uuid of vApp/VM
4593 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4594 vmname_andid - vmname
4595
4596 Returns:
4597 The status of add SRIOV adapter task , vm object and
4598 vcenter_conect object
4599 """
4600 vm_obj = None
4601 vcenter_conect, content = self.get_vcenter_content()
4602 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4603
4604 if vm_moref_id:
4605 try:
4606 no_of_sriov_devices = len(sriov_nets)
4607 if no_of_sriov_devices > 0:
4608 #Get VM and its host
4609 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4610 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4611 if host_obj and vm_obj:
4612 #get SRIOV devies from host on which vapp is currently installed
4613 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4614 no_of_sriov_devices,
4615 )
4616
4617 if len(avilable_sriov_devices) == 0:
4618 #find other hosts with active pci devices
4619 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4620 content,
4621 no_of_sriov_devices,
4622 )
4623
4624 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4625 #Migrate vm to the host where SRIOV devices are available
4626 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4627 new_host_obj))
4628 task = self.relocate_vm(new_host_obj, vm_obj)
4629 if task is not None:
4630 result = self.wait_for_vcenter_task(task, vcenter_conect)
4631 self.logger.info("Migrate VM status: {}".format(result))
4632 host_obj = new_host_obj
4633 else:
4634 self.logger.info("Fail to migrate VM : {}".format(result))
4635 raise vimconn.vimconnNotFoundException(
4636 "Fail to migrate VM : {} to host {}".format(
4637 vmname_andid,
4638 new_host_obj)
4639 )
4640
4641 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4642 #Add SRIOV devices one by one
4643 for sriov_net in sriov_nets:
4644 network_name = sriov_net.get('net_id')
4645 dvs_portgr_name = self.create_dvPort_group(network_name)
4646 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
4647 #add vlan ID ,Modify portgroup for vlan ID
4648 self.configure_vlanID(content, vcenter_conect, network_name)
4649
4650 task = self.add_sriov_to_vm(content,
4651 vm_obj,
4652 host_obj,
4653 network_name,
4654 avilable_sriov_devices[0]
4655 )
4656 if task:
4657 status= self.wait_for_vcenter_task(task, vcenter_conect)
4658 if status:
4659 self.logger.info("Added SRIOV {} to VM {}".format(
4660 no_of_sriov_devices,
4661 str(vm_obj)))
4662 else:
4663 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4664 no_of_sriov_devices,
4665 str(vm_obj)))
4666 raise vimconn.vimconnUnexpectedResponse(
4667 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4668 )
4669 return True, vm_obj, vcenter_conect
4670 else:
4671 self.logger.error("Currently there is no host with"\
4672 " {} number of avaialble SRIOV "\
4673 "VFs required for VM {}".format(
4674 no_of_sriov_devices,
4675 vmname_andid)
4676 )
4677 raise vimconn.vimconnNotFoundException(
4678 "Currently there is no host with {} "\
4679 "number of avaialble SRIOV devices required for VM {}".format(
4680 no_of_sriov_devices,
4681 vmname_andid))
4682 else:
4683 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4684
4685 except vmodl.MethodFault as error:
4686 self.logger.error("Error occurred while adding SRIOV {} ",error)
4687 return None, vm_obj, vcenter_conect
4688
4689
4690 def get_sriov_devices(self,host, no_of_vfs):
4691 """
4692 Method to get the details of SRIOV devices on given host
4693 Args:
4694 host - vSphere host object
4695 no_of_vfs - number of VFs needed on host
4696
4697 Returns:
4698 array of SRIOV devices
4699 """
4700 sriovInfo=[]
4701 if host:
4702 for device in host.config.pciPassthruInfo:
4703 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4704 if device.numVirtualFunction >= no_of_vfs:
4705 sriovInfo.append(device)
4706 break
4707 return sriovInfo
4708
4709
4710 def get_host_and_sriov_devices(self, content, no_of_vfs):
4711 """
4712 Method to get the details of SRIOV devices infromation on all hosts
4713
4714 Args:
4715 content - vSphere host object
4716 no_of_vfs - number of pci VFs needed on host
4717
4718 Returns:
4719 array of SRIOV devices and host object
4720 """
4721 host_obj = None
4722 sriov_device_objs = None
4723 try:
4724 if content:
4725 container = content.viewManager.CreateContainerView(content.rootFolder,
4726 [vim.HostSystem], True)
4727 for host in container.view:
4728 devices = self.get_sriov_devices(host, no_of_vfs)
4729 if devices:
4730 host_obj = host
4731 sriov_device_objs = devices
4732 break
4733 except Exception as exp:
4734 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4735
4736 return host_obj,sriov_device_objs
4737
4738
4739 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4740 """
4741 Method to add SRIOV adapter to vm
4742
4743 Args:
4744 host_obj - vSphere host object
4745 vm_obj - vSphere vm object
4746 content - vCenter content object
4747 network_name - name of distributed virtaul portgroup
4748 sriov_device - SRIOV device info
4749
4750 Returns:
4751 task object
4752 """
4753 devices = []
4754 vnic_label = "sriov nic"
4755 try:
4756 dvs_portgr = self.get_dvport_group(network_name)
4757 network_name = dvs_portgr.name
4758 nic = vim.vm.device.VirtualDeviceSpec()
4759 # VM device
4760 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4761 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4762 nic.device.addressType = 'assigned'
4763 #nic.device.key = 13016
4764 nic.device.deviceInfo = vim.Description()
4765 nic.device.deviceInfo.label = vnic_label
4766 nic.device.deviceInfo.summary = network_name
4767 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4768
4769 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4770 nic.device.backing.deviceName = network_name
4771 nic.device.backing.useAutoDetect = False
4772 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4773 nic.device.connectable.startConnected = True
4774 nic.device.connectable.allowGuestControl = True
4775
4776 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4777 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4778 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4779
4780 devices.append(nic)
4781 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4782 task = vm_obj.ReconfigVM_Task(vmconf)
4783 return task
4784 except Exception as exp:
4785 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4786 return None
4787
4788
4789 def create_dvPort_group(self, network_name):
4790 """
4791 Method to create disributed virtual portgroup
4792
4793 Args:
4794 network_name - name of network/portgroup
4795
4796 Returns:
4797 portgroup key
4798 """
4799 try:
4800 new_network_name = [network_name, '-', str(uuid.uuid4())]
4801 network_name=''.join(new_network_name)
4802 vcenter_conect, content = self.get_vcenter_content()
4803
4804 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4805 if dv_switch:
4806 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4807 dv_pg_spec.name = network_name
4808
4809 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4810 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4811 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4812 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4813 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4814 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4815
4816 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4817 self.wait_for_vcenter_task(task, vcenter_conect)
4818
4819 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4820 if dvPort_group:
4821 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4822 return dvPort_group.key
4823 else:
4824 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4825
4826 except Exception as exp:
4827 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4828 " : {}".format(network_name, exp))
4829 return None
4830
4831 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4832 """
4833 Method to reconfigure disributed virtual portgroup
4834
4835 Args:
4836 dvPort_group_name - name of disributed virtual portgroup
4837 content - vCenter content object
4838 config_info - disributed virtual portgroup configuration
4839
4840 Returns:
4841 task object
4842 """
4843 try:
4844 dvPort_group = self.get_dvport_group(dvPort_group_name)
4845 if dvPort_group:
4846 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4847 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4848 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4849 if "vlanID" in config_info:
4850 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4851 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4852
4853 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4854 return task
4855 else:
4856 return None
4857 except Exception as exp:
4858 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4859 " : {}".format(dvPort_group_name, exp))
4860 return None
4861
4862
4863 def destroy_dvport_group(self , dvPort_group_name):
4864 """
4865 Method to destroy disributed virtual portgroup
4866
4867 Args:
4868 network_name - name of network/portgroup
4869
4870 Returns:
4871 True if portgroup successfully got deleted else false
4872 """
4873 vcenter_conect, content = self.get_vcenter_content()
4874 try:
4875 status = None
4876 dvPort_group = self.get_dvport_group(dvPort_group_name)
4877 if dvPort_group:
4878 task = dvPort_group.Destroy_Task()
4879 status = self.wait_for_vcenter_task(task, vcenter_conect)
4880 return status
4881 except vmodl.MethodFault as exp:
4882 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4883 exp, dvPort_group_name))
4884 return None
4885
4886
4887 def get_dvport_group(self, dvPort_group_name):
4888 """
4889 Method to get disributed virtual portgroup
4890
4891 Args:
4892 network_name - name of network/portgroup
4893
4894 Returns:
4895 portgroup object
4896 """
4897 vcenter_conect, content = self.get_vcenter_content()
4898 dvPort_group = None
4899 try:
4900 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4901 for item in container.view:
4902 if item.key == dvPort_group_name:
4903 dvPort_group = item
4904 break
4905 return dvPort_group
4906 except vmodl.MethodFault as exp:
4907 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4908 exp, dvPort_group_name))
4909 return None
4910
4911 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4912 """
4913 Method to get disributed virtual portgroup vlanID
4914
4915 Args:
4916 network_name - name of network/portgroup
4917
4918 Returns:
4919 vlan ID
4920 """
4921 vlanId = None
4922 try:
4923 dvPort_group = self.get_dvport_group(dvPort_group_name)
4924 if dvPort_group:
4925 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4926 except vmodl.MethodFault as exp:
4927 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4928 exp, dvPort_group_name))
4929 return vlanId
4930
4931
4932 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4933 """
4934 Method to configure vlanID in disributed virtual portgroup vlanID
4935
4936 Args:
4937 network_name - name of network/portgroup
4938
4939 Returns:
4940 None
4941 """
4942 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4943 if vlanID == 0:
4944 #configure vlanID
4945 vlanID = self.genrate_vlanID(dvPort_group_name)
4946 config = {"vlanID":vlanID}
4947 task = self.reconfig_portgroup(content, dvPort_group_name,
4948 config_info=config)
4949 if task:
4950 status= self.wait_for_vcenter_task(task, vcenter_conect)
4951 if status:
4952 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4953 dvPort_group_name,vlanID))
4954 else:
4955 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4956 dvPort_group_name, vlanID))
4957
4958
4959 def genrate_vlanID(self, network_name):
4960 """
4961 Method to get unused vlanID
4962 Args:
4963 network_name - name of network/portgroup
4964 Returns:
4965 vlanID
4966 """
4967 vlan_id = None
4968 used_ids = []
4969 if self.config.get('vlanID_range') == None:
4970 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4971 "at config value before creating sriov network with vlan tag")
4972 if "used_vlanIDs" not in self.persistent_info:
4973 self.persistent_info["used_vlanIDs"] = {}
4974 else:
4975 used_ids = self.persistent_info["used_vlanIDs"].values()
4976
4977 for vlanID_range in self.config.get('vlanID_range'):
4978 start_vlanid , end_vlanid = vlanID_range.split("-")
4979 if start_vlanid > end_vlanid:
4980 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4981 vlanID_range))
4982
4983 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4984 if id not in used_ids:
4985 vlan_id = id
4986 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4987 return vlan_id
4988 if vlan_id is None:
4989 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4990
4991
4992 def get_obj(self, content, vimtype, name):
4993 """
4994 Get the vsphere object associated with a given text name
4995 """
4996 obj = None
4997 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4998 for item in container.view:
4999 if item.name == name:
5000 obj = item
5001 break
5002 return obj
5003
5004
5005 def insert_media_to_vm(self, vapp, image_id):
5006 """
5007 Method to insert media CD-ROM (ISO image) from catalog to vm.
5008 vapp - vapp object to get vm id
5009 Image_id - image id for cdrom to be inerted to vm
5010 """
5011 # create connection object
5012 vca = self.connect()
5013 try:
5014 # fetching catalog details
5015 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5016 response = Http.get(url=rest_url,
5017 headers=vca.vcloud_session.get_vcloud_headers(),
5018 verify=vca.verify,
5019 logger=vca.logger)
5020
5021 if response.status_code != 200:
5022 self.logger.error("REST call {} failed reason : {}"\
5023 "status code : {}".format(url_rest_call,
5024 response.content,
5025 response.status_code))
5026 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5027 "catalog details")
5028 # searching iso name and id
5029 iso_name,media_id = self.get_media_details(vca, response.content)
5030
5031 if iso_name and media_id:
5032 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5033 <ns6:MediaInsertOrEjectParams
5034 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5035 <ns6:Media
5036 type="application/vnd.vmware.vcloud.media+xml"
5037 name="{}.iso"
5038 id="urn:vcloud:media:{}"
5039 href="https://{}/api/media/{}"/>
5040 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5041 vca.host,media_id)
5042
5043 for vms in vapp._get_vms():
5044 vm_id = (vms.id).split(':')[-1]
5045
5046 headers = vca.vcloud_session.get_vcloud_headers()
5047 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5048 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5049
5050 response = Http.post(url=rest_url,
5051 headers=headers,
5052 data=data,
5053 verify=vca.verify,
5054 logger=vca.logger)
5055
5056 if response.status_code != 202:
5057 self.logger.error("Failed to insert CD-ROM to vm")
5058 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5059 "ISO image to vm")
5060 else:
5061 task = taskType.parseString(response.content, True)
5062 if isinstance(task, GenericTask):
5063 vca.block_until_completed(task)
5064 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5065 " image to vm {}".format(vm_id))
5066 except Exception as exp:
5067 self.logger.error("insert_media_to_vm() : exception occurred "\
5068 "while inserting media CD-ROM")
5069 raise vimconn.vimconnException(message=exp)
5070
5071
5072 def get_media_details(self, vca, content):
5073 """
5074 Method to get catalog item details
5075 vca - connection object
5076 content - Catalog details
5077 Return - Media name, media id
5078 """
5079 cataloghref_list = []
5080 try:
5081 if content:
5082 vm_list_xmlroot = XmlElementTree.fromstring(content)
5083 for child in vm_list_xmlroot.iter():
5084 if 'CatalogItem' in child.tag:
5085 cataloghref_list.append(child.attrib.get('href'))
5086 if cataloghref_list is not None:
5087 for href in cataloghref_list:
5088 if href:
5089 response = Http.get(url=href,
5090 headers=vca.vcloud_session.get_vcloud_headers(),
5091 verify=vca.verify,
5092 logger=vca.logger)
5093 if response.status_code != 200:
5094 self.logger.error("REST call {} failed reason : {}"\
5095 "status code : {}".format(href,
5096 response.content,
5097 response.status_code))
5098 raise vimconn.vimconnException("get_media_details : Failed to get "\
5099 "catalogitem details")
5100 list_xmlroot = XmlElementTree.fromstring(response.content)
5101 for child in list_xmlroot.iter():
5102 if 'Entity' in child.tag:
5103 if 'media' in child.attrib.get('href'):
5104 name = child.attrib.get('name')
5105 media_id = child.attrib.get('href').split('/').pop()
5106 return name,media_id
5107 else:
5108 self.logger.debug("Media name and id not found")
5109 return False,False
5110 except Exception as exp:
5111 self.logger.error("get_media_details : exception occurred "\
5112 "getting media details")
5113 raise vimconn.vimconnException(message=exp)
5114
5115
5116 def retry_rest(self, method, url, add_headers=None, data=None):
5117 """ Method to get Token & retry respective REST request
5118 Args:
5119 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5120 url - request url to be used
5121 add_headers - Additional headers (optional)
5122 data - Request payload data to be passed in request
5123 Returns:
5124 response - Response of request
5125 """
5126 response = None
5127
5128 #Get token
5129 self.get_token()
5130
5131 headers=self.vca.vcloud_session.get_vcloud_headers()
5132
5133 if add_headers:
5134 headers.update(add_headers)
5135
5136 if method == 'GET':
5137 response = Http.get(url=url,
5138 headers=headers,
5139 verify=self.vca.verify,
5140 logger=self.vca.logger)
5141 elif method == 'PUT':
5142 response = Http.put(url=url,
5143 data=data,
5144 headers=headers,
5145 verify=self.vca.verify,
5146 logger=self.logger)
5147 elif method == 'POST':
5148 response = Http.post(url=url,
5149 headers=headers,
5150 data=data,
5151 verify=self.vca.verify,
5152 logger=self.vca.logger)
5153 elif method == 'DELETE':
5154 response = Http.delete(url=url,
5155 headers=headers,
5156 verify=self.vca.verify,
5157 logger=self.vca.logger)
5158 return response
5159
5160
5161 def get_token(self):
5162 """ Generate a new token if expired
5163
5164 Returns:
5165 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5166 """
5167 vca = None
5168
5169 try:
5170 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5171 self.user,
5172 self.org_name))
5173 vca = VCA(host=self.url,
5174 username=self.user,
5175 service_type=STANDALONE,
5176 version=VCAVERSION,
5177 verify=False,
5178 log=False)
5179
5180 result = vca.login(password=self.passwd, org=self.org_name)
5181 if result is True:
5182 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5183 if result is True:
5184 self.logger.info(
5185 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5186 #Update vca
5187 self.vca = vca
5188 return
5189
5190 except:
5191 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5192 "{} as user: {}".format(self.org_name, self.user))
5193
5194 if not vca or not result:
5195 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5196
5197
5198 def get_vdc_details(self):
5199 """ Get VDC details using pyVcloud Lib
5200
5201 Returns vdc object
5202 """
5203 vdc = self.vca.get_vdc(self.tenant_name)
5204
5205 #Retry once, if failed by refreshing token
5206 if vdc is None:
5207 self.get_token()
5208 vdc = self.vca.get_vdc(self.tenant_name)
5209
5210 return vdc
5211
5212