bug 425 fix SR-IOV PCI-PASSTHROUGH interfaces
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """ Delete a tenant from VIM
431 Args:
432 tenant_id is tenant_id to be deleted.
433
434 Return:
435 returns the tenant identifier in UUID format.
436 If action is failed method will throw exception
437 """
438 vca = self.connect_as_admin()
439 if not vca:
440 raise vimconn.vimconnConnectionException("self.connect() is failed")
441
442 if tenant_id is not None:
443 if vca.vcloud_session and vca.vcloud_session.organization:
444 #Get OrgVDC
445 url_list = [self.vca.host, '/api/vdc/', tenant_id]
446 orgvdc_herf = ''.join(url_list)
447 response = Http.get(url=orgvdc_herf,
448 headers=vca.vcloud_session.get_vcloud_headers(),
449 verify=vca.verify,
450 logger=vca.logger)
451
452 if response.status_code != requests.codes.ok:
453 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
454 "Return status code {}".format(orgvdc_herf,
455 response.status_code))
456 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
457
458 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
459 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
460 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
461 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
462 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
463
464 #Delete OrgVDC
465 response = Http.delete(url=vdc_remove_href,
466 headers=vca.vcloud_session.get_vcloud_headers(),
467 verify=vca.verify,
468 logger=vca.logger)
469
470 if response.status_code == 202:
471 delete_vdc_task = taskType.parseString(response.content, True)
472 if type(delete_vdc_task) is GenericTask:
473 self.vca.block_until_completed(delete_vdc_task)
474 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
475 return tenant_id
476 else:
477 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
478 "Return status code {}".format(vdc_remove_href,
479 response.status_code))
480 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
481 else:
482 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
483 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
484
485
486 def get_tenant_list(self, filter_dict={}):
487 """Obtain tenants of VIM
488 filter_dict can contain the following keys:
489 name: filter by tenant name
490 id: filter by tenant uuid/id
491 <other VIM specific>
492 Returns the tenant list of dictionaries:
493 [{'name':'<name>, 'id':'<id>, ...}, ...]
494
495 """
496 org_dict = self.get_org(self.org_uuid)
497 vdcs_dict = org_dict['vdcs']
498
499 vdclist = []
500 try:
501 for k in vdcs_dict:
502 entry = {'name': vdcs_dict[k], 'id': k}
503 # if caller didn't specify dictionary we return all tenants.
504 if filter_dict is not None and filter_dict:
505 filtered_entry = entry.copy()
506 filtered_dict = set(entry.keys()) - set(filter_dict)
507 for unwanted_key in filtered_dict: del entry[unwanted_key]
508 if filter_dict == entry:
509 vdclist.append(filtered_entry)
510 else:
511 vdclist.append(entry)
512 except:
513 self.logger.debug("Error in get_tenant_list()")
514 self.logger.debug(traceback.format_exc())
515 raise vimconn.vimconnException("Incorrect state. {}")
516
517 return vdclist
518
519 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
520 """Adds a tenant network to VIM
521 net_name is the name
522 net_type can be 'bridge','data'.'ptp'.
523 ip_profile is a dict containing the IP parameters of the network
524 shared is a boolean
525 Returns the network identifier"""
526
527 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
528 .format(net_name, net_type, ip_profile, shared))
529
530 isshared = 'false'
531 if shared:
532 isshared = 'true'
533
534 # ############# Stub code for SRIOV #################
535 # if net_type == "data" or net_type == "ptp":
536 # if self.config.get('dv_switch_name') == None:
537 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
538 # network_uuid = self.create_dvPort_group(net_name)
539
540 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
541 ip_profile=ip_profile, isshared=isshared)
542 if network_uuid is not None:
543 return network_uuid
544 else:
545 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
546
547 def get_vcd_network_list(self):
548 """ Method available organization for a logged in tenant
549
550 Returns:
551 The return vca object that letter can be used to connect to vcloud direct as admin
552 """
553
554 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
555
556 if not self.tenant_name:
557 raise vimconn.vimconnConnectionException("Tenant name is empty.")
558
559 vdc = self.get_vdc_details()
560 if vdc is None:
561 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
562
563 vdc_uuid = vdc.get_id().split(":")[3]
564 networks = self.vca.get_networks(vdc.get_name())
565 network_list = []
566 try:
567 for network in networks:
568 filter_dict = {}
569 netid = network.get_id().split(":")
570 if len(netid) != 4:
571 continue
572
573 filter_dict["name"] = network.get_name()
574 filter_dict["id"] = netid[3]
575 filter_dict["shared"] = network.get_IsShared()
576 filter_dict["tenant_id"] = vdc_uuid
577 if network.get_status() == 1:
578 filter_dict["admin_state_up"] = True
579 else:
580 filter_dict["admin_state_up"] = False
581 filter_dict["status"] = "ACTIVE"
582 filter_dict["type"] = "bridge"
583 network_list.append(filter_dict)
584 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
585 except:
586 self.logger.debug("Error in get_vcd_network_list")
587 self.logger.debug(traceback.format_exc())
588 pass
589
590 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
591 return network_list
592
593 def get_network_list(self, filter_dict={}):
594 """Obtain tenant networks of VIM
595 Filter_dict can be:
596 name: network name OR/AND
597 id: network uuid OR/AND
598 shared: boolean OR/AND
599 tenant_id: tenant OR/AND
600 admin_state_up: boolean
601 status: 'ACTIVE'
602
603 [{key : value , key : value}]
604
605 Returns the network list of dictionaries:
606 [{<the fields at Filter_dict plus some VIM specific>}, ...]
607 List can be empty
608 """
609
610 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
611
612 if not self.tenant_name:
613 raise vimconn.vimconnConnectionException("Tenant name is empty.")
614
615 vdc = self.get_vdc_details()
616 if vdc is None:
617 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
618
619 try:
620 vdcid = vdc.get_id().split(":")[3]
621 networks = self.vca.get_networks(vdc.get_name())
622 network_list = []
623
624 for network in networks:
625 filter_entry = {}
626 net_uuid = network.get_id().split(":")
627 if len(net_uuid) != 4:
628 continue
629 else:
630 net_uuid = net_uuid[3]
631 # create dict entry
632 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
633 vdcid,
634 network.get_name()))
635 filter_entry["name"] = network.get_name()
636 filter_entry["id"] = net_uuid
637 filter_entry["shared"] = network.get_IsShared()
638 filter_entry["tenant_id"] = vdcid
639 if network.get_status() == 1:
640 filter_entry["admin_state_up"] = True
641 else:
642 filter_entry["admin_state_up"] = False
643 filter_entry["status"] = "ACTIVE"
644 filter_entry["type"] = "bridge"
645 filtered_entry = filter_entry.copy()
646
647 if filter_dict is not None and filter_dict:
648 # we remove all the key : value we don't care and match only
649 # respected field
650 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
651 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
652 if filter_dict == filter_entry:
653 network_list.append(filtered_entry)
654 else:
655 network_list.append(filtered_entry)
656 except:
657 self.logger.debug("Error in get_vcd_network_list")
658 self.logger.debug(traceback.format_exc())
659
660 self.logger.debug("Returning {}".format(network_list))
661 return network_list
662
663 def get_network(self, net_id):
664 """Method obtains network details of net_id VIM network
665 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
666
667 try:
668 vdc = self.get_vdc_details()
669 vdc_id = vdc.get_id().split(":")[3]
670
671 networks = self.vca.get_networks(vdc.get_name())
672 filter_dict = {}
673
674 for network in networks:
675 vdc_network_id = network.get_id().split(":")
676 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
677 filter_dict["name"] = network.get_name()
678 filter_dict["id"] = vdc_network_id[3]
679 filter_dict["shared"] = network.get_IsShared()
680 filter_dict["tenant_id"] = vdc_id
681 if network.get_status() == 1:
682 filter_dict["admin_state_up"] = True
683 else:
684 filter_dict["admin_state_up"] = False
685 filter_dict["status"] = "ACTIVE"
686 filter_dict["type"] = "bridge"
687 self.logger.debug("Returning {}".format(filter_dict))
688 return filter_dict
689 except:
690 self.logger.debug("Error in get_network")
691 self.logger.debug(traceback.format_exc())
692
693 return filter_dict
694
695 def delete_network(self, net_id):
696 """
697 Method Deletes a tenant network from VIM, provide the network id.
698
699 Returns the network identifier or raise an exception
700 """
701
702 # ############# Stub code for SRIOV #################
703 # dvport_group = self.get_dvport_group(net_id)
704 # if dvport_group:
705 # #delete portgroup
706 # status = self.destroy_dvport_group(net_id)
707 # if status:
708 # # Remove vlanID from persistent info
709 # if net_id in self.persistent_info["used_vlanIDs"]:
710 # del self.persistent_info["used_vlanIDs"][net_id]
711 #
712 # return net_id
713
714 vcd_network = self.get_vcd_network(network_uuid=net_id)
715 if vcd_network is not None and vcd_network:
716 if self.delete_network_action(network_uuid=net_id):
717 return net_id
718 else:
719 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
720
721 def refresh_nets_status(self, net_list):
722 """Get the status of the networks
723 Params: the list of network identifiers
724 Returns a dictionary with:
725 net_id: #VIM id of this network
726 status: #Mandatory. Text with one of:
727 # DELETED (not found at vim)
728 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
729 # OTHER (Vim reported other status not understood)
730 # ERROR (VIM indicates an ERROR status)
731 # ACTIVE, INACTIVE, DOWN (admin down),
732 # BUILD (on building process)
733 #
734 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
735 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
736
737 """
738
739 dict_entry = {}
740 try:
741 for net in net_list:
742 errormsg = ''
743 vcd_network = self.get_vcd_network(network_uuid=net)
744 if vcd_network is not None and vcd_network:
745 if vcd_network['status'] == '1':
746 status = 'ACTIVE'
747 else:
748 status = 'DOWN'
749 else:
750 status = 'DELETED'
751 errormsg = 'Network not found.'
752
753 dict_entry[net] = {'status': status, 'error_msg': errormsg,
754 'vim_info': yaml.safe_dump(vcd_network)}
755 except:
756 self.logger.debug("Error in refresh_nets_status")
757 self.logger.debug(traceback.format_exc())
758
759 return dict_entry
760
761 def get_flavor(self, flavor_id):
762 """Obtain flavor details from the VIM
763 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
764 """
765 if flavor_id not in vimconnector.flavorlist:
766 raise vimconn.vimconnNotFoundException("Flavor not found.")
767 return vimconnector.flavorlist[flavor_id]
768
769 def new_flavor(self, flavor_data):
770 """Adds a tenant flavor to VIM
771 flavor_data contains a dictionary with information, keys:
772 name: flavor name
773 ram: memory (cloud type) in MBytes
774 vpcus: cpus (cloud type)
775 extended: EPA parameters
776 - numas: #items requested in same NUMA
777 memory: number of 1G huge pages memory
778 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
779 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
780 - name: interface name
781 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
782 bandwidth: X Gbps; requested guarantee bandwidth
783 vpci: requested virtual PCI address
784 disk: disk size
785 is_public:
786 #TODO to concrete
787 Returns the flavor identifier"""
788
789 # generate a new uuid put to internal dict and return it.
790 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
791 new_flavor=flavor_data
792 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
793 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
794 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
795
796 if not isinstance(ram, int):
797 raise vimconn.vimconnException("Non-integer value for ram")
798 elif not isinstance(cpu, int):
799 raise vimconn.vimconnException("Non-integer value for cpu")
800 elif not isinstance(disk, int):
801 raise vimconn.vimconnException("Non-integer value for disk")
802
803 extended_flv = flavor_data.get("extended")
804 if extended_flv:
805 numas=extended_flv.get("numas")
806 if numas:
807 for numa in numas:
808 #overwrite ram and vcpus
809 ram = numa['memory']*1024
810 if 'paired-threads' in numa:
811 cpu = numa['paired-threads']*2
812 elif 'cores' in numa:
813 cpu = numa['cores']
814 elif 'threads' in numa:
815 cpu = numa['threads']
816
817 new_flavor[FLAVOR_RAM_KEY] = ram
818 new_flavor[FLAVOR_VCPUS_KEY] = cpu
819 new_flavor[FLAVOR_DISK_KEY] = disk
820 # generate a new uuid put to internal dict and return it.
821 flavor_id = uuid.uuid4()
822 vimconnector.flavorlist[str(flavor_id)] = new_flavor
823 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
824
825 return str(flavor_id)
826
827 def delete_flavor(self, flavor_id):
828 """Deletes a tenant flavor from VIM identify by its id
829
830 Returns the used id or raise an exception
831 """
832 if flavor_id not in vimconnector.flavorlist:
833 raise vimconn.vimconnNotFoundException("Flavor not found.")
834
835 vimconnector.flavorlist.pop(flavor_id, None)
836 return flavor_id
837
838 def new_image(self, image_dict):
839 """
840 Adds a tenant image to VIM
841 Returns:
842 200, image-id if the image is created
843 <0, message if there is an error
844 """
845
846 return self.get_image_id_from_path(image_dict['location'])
847
848 def delete_image(self, image_id):
849 """
850 Deletes a tenant image from VIM
851 Args:
852 image_id is ID of Image to be deleted
853 Return:
854 returns the image identifier in UUID format or raises an exception on error
855 """
856 vca = self.connect_as_admin()
857 if not vca:
858 raise vimconn.vimconnConnectionException("self.connect() is failed")
859 # Get Catalog details
860 url_list = [self.vca.host, '/api/catalog/', image_id]
861 catalog_herf = ''.join(url_list)
862 response = Http.get(url=catalog_herf,
863 headers=vca.vcloud_session.get_vcloud_headers(),
864 verify=vca.verify,
865 logger=vca.logger)
866
867 if response.status_code != requests.codes.ok:
868 self.logger.debug("delete_image():GET REST API call {} failed. "\
869 "Return status code {}".format(catalog_herf,
870 response.status_code))
871 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
872
873 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
874 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
875 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
876
877 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
878 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
879 for catalogItem in catalogItems:
880 catalogItem_href = catalogItem.attrib['href']
881
882 #GET details of catalogItem
883 response = Http.get(url=catalogItem_href,
884 headers=vca.vcloud_session.get_vcloud_headers(),
885 verify=vca.verify,
886 logger=vca.logger)
887
888 if response.status_code != requests.codes.ok:
889 self.logger.debug("delete_image():GET REST API call {} failed. "\
890 "Return status code {}".format(catalog_herf,
891 response.status_code))
892 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
893 catalogItem,
894 image_id))
895
896 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
897 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
898 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
899 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
900
901 #Remove catalogItem
902 response = Http.delete(url= catalogitem_remove_href,
903 headers=vca.vcloud_session.get_vcloud_headers(),
904 verify=vca.verify,
905 logger=vca.logger)
906 if response.status_code == requests.codes.no_content:
907 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
908 else:
909 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
910
911 #Remove catalog
912 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
913 catalog_remove_herf = ''.join(url_list)
914 response = Http.delete(url= catalog_remove_herf,
915 headers=vca.vcloud_session.get_vcloud_headers(),
916 verify=vca.verify,
917 logger=vca.logger)
918
919 if response.status_code == requests.codes.no_content:
920 self.logger.debug("Deleted Catalog {}".format(image_id))
921 return image_id
922 else:
923 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
924
925
926 def catalog_exists(self, catalog_name, catalogs):
927 """
928
929 :param catalog_name:
930 :param catalogs:
931 :return:
932 """
933 for catalog in catalogs:
934 if catalog.name == catalog_name:
935 return True
936 return False
937
938 def create_vimcatalog(self, vca=None, catalog_name=None):
939 """ Create new catalog entry in vCloud director.
940
941 Args
942 vca: vCloud director.
943 catalog_name catalog that client wish to create. Note no validation done for a name.
944 Client must make sure that provide valid string representation.
945
946 Return (bool) True if catalog created.
947
948 """
949 try:
950 task = vca.create_catalog(catalog_name, catalog_name)
951 result = vca.block_until_completed(task)
952 if not result:
953 return False
954 catalogs = vca.get_catalogs()
955 except:
956 return False
957 return self.catalog_exists(catalog_name, catalogs)
958
959 # noinspection PyIncorrectDocstring
960 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
961 description='', progress=False, chunk_bytes=128 * 1024):
962 """
963 Uploads a OVF file to a vCloud catalog
964
965 :param chunk_bytes:
966 :param progress:
967 :param description:
968 :param image_name:
969 :param vca:
970 :param catalog_name: (str): The name of the catalog to upload the media.
971 :param media_file_name: (str): The name of the local media file to upload.
972 :return: (bool) True if the media file was successfully uploaded, false otherwise.
973 """
974 os.path.isfile(media_file_name)
975 statinfo = os.stat(media_file_name)
976
977 # find a catalog entry where we upload OVF.
978 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
979 # status change.
980 # if VCD can parse OVF we upload VMDK file
981 try:
982 for catalog in vca.get_catalogs():
983 if catalog_name != catalog.name:
984 continue
985 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
986 link.get_rel() == 'add', catalog.get_Link())
987 assert len(link) == 1
988 data = """
989 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
990 """ % (escape(catalog_name), escape(description))
991 headers = vca.vcloud_session.get_vcloud_headers()
992 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
993 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
994 if response.status_code == requests.codes.created:
995 catalogItem = XmlElementTree.fromstring(response.content)
996 entity = [child for child in catalogItem if
997 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
998 href = entity.get('href')
999 template = href
1000 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1001 verify=vca.verify, logger=self.logger)
1002
1003 if response.status_code == requests.codes.ok:
1004 media = mediaType.parseString(response.content, True)
1005 link = filter(lambda link: link.get_rel() == 'upload:default',
1006 media.get_Files().get_File()[0].get_Link())[0]
1007 headers = vca.vcloud_session.get_vcloud_headers()
1008 headers['Content-Type'] = 'Content-Type text/xml'
1009 response = Http.put(link.get_href(),
1010 data=open(media_file_name, 'rb'),
1011 headers=headers,
1012 verify=vca.verify, logger=self.logger)
1013 if response.status_code != requests.codes.ok:
1014 self.logger.debug(
1015 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1016 media_file_name))
1017 return False
1018
1019 # TODO fix this with aync block
1020 time.sleep(5)
1021
1022 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1023
1024 # uploading VMDK file
1025 # check status of OVF upload and upload remaining files.
1026 response = Http.get(template,
1027 headers=vca.vcloud_session.get_vcloud_headers(),
1028 verify=vca.verify,
1029 logger=self.logger)
1030
1031 if response.status_code == requests.codes.ok:
1032 media = mediaType.parseString(response.content, True)
1033 number_of_files = len(media.get_Files().get_File())
1034 for index in xrange(0, number_of_files):
1035 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1036 media.get_Files().get_File()[index].get_Link())
1037 for link in links_list:
1038 # we skip ovf since it already uploaded.
1039 if 'ovf' in link.get_href():
1040 continue
1041 # The OVF file and VMDK must be in a same directory
1042 head, tail = os.path.split(media_file_name)
1043 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1044 if not os.path.isfile(file_vmdk):
1045 return False
1046 statinfo = os.stat(file_vmdk)
1047 if statinfo.st_size == 0:
1048 return False
1049 hrefvmdk = link.get_href()
1050
1051 if progress:
1052 print("Uploading file: {}".format(file_vmdk))
1053 if progress:
1054 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1055 FileTransferSpeed()]
1056 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1057
1058 bytes_transferred = 0
1059 f = open(file_vmdk, 'rb')
1060 while bytes_transferred < statinfo.st_size:
1061 my_bytes = f.read(chunk_bytes)
1062 if len(my_bytes) <= chunk_bytes:
1063 headers = vca.vcloud_session.get_vcloud_headers()
1064 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1065 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1066 headers['Content-Length'] = str(len(my_bytes))
1067 response = Http.put(hrefvmdk,
1068 headers=headers,
1069 data=my_bytes,
1070 verify=vca.verify,
1071 logger=None)
1072
1073 if response.status_code == requests.codes.ok:
1074 bytes_transferred += len(my_bytes)
1075 if progress:
1076 progress_bar.update(bytes_transferred)
1077 else:
1078 self.logger.debug(
1079 'file upload failed with error: [%s] %s' % (response.status_code,
1080 response.content))
1081
1082 f.close()
1083 return False
1084 f.close()
1085 if progress:
1086 progress_bar.finish()
1087 time.sleep(10)
1088 return True
1089 else:
1090 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1091 format(catalog_name, media_file_name))
1092 return False
1093 except Exception as exp:
1094 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1095 .format(catalog_name,media_file_name, exp))
1096 raise vimconn.vimconnException(
1097 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1098 .format(catalog_name,media_file_name, exp))
1099
1100 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1101 return False
1102
1103 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1104 """Upload media file"""
1105 # TODO add named parameters for readability
1106
1107 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1108 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1109
1110 def validate_uuid4(self, uuid_string=None):
1111 """ Method validate correct format of UUID.
1112
1113 Return: true if string represent valid uuid
1114 """
1115 try:
1116 val = uuid.UUID(uuid_string, version=4)
1117 except ValueError:
1118 return False
1119 return True
1120
1121 def get_catalogid(self, catalog_name=None, catalogs=None):
1122 """ Method check catalog and return catalog ID in UUID format.
1123
1124 Args
1125 catalog_name: catalog name as string
1126 catalogs: list of catalogs.
1127
1128 Return: catalogs uuid
1129 """
1130
1131 for catalog in catalogs:
1132 if catalog.name == catalog_name:
1133 catalog_id = catalog.get_id().split(":")
1134 return catalog_id[3]
1135 return None
1136
1137 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1138 """ Method check catalog and return catalog name lookup done by catalog UUID.
1139
1140 Args
1141 catalog_name: catalog name as string
1142 catalogs: list of catalogs.
1143
1144 Return: catalogs name or None
1145 """
1146
1147 if not self.validate_uuid4(uuid_string=catalog_uuid):
1148 return None
1149
1150 for catalog in catalogs:
1151 catalog_id = catalog.get_id().split(":")[3]
1152 if catalog_id == catalog_uuid:
1153 return catalog.name
1154 return None
1155
1156 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1157 """ Method check catalog and return catalog name lookup done by catalog UUID.
1158
1159 Args
1160 catalog_name: catalog name as string
1161 catalogs: list of catalogs.
1162
1163 Return: catalogs name or None
1164 """
1165
1166 if not self.validate_uuid4(uuid_string=catalog_uuid):
1167 return None
1168
1169 for catalog in catalogs:
1170 catalog_id = catalog.get_id().split(":")[3]
1171 if catalog_id == catalog_uuid:
1172 return catalog
1173 return None
1174
1175 def get_image_id_from_path(self, path=None, progress=False):
1176 """ Method upload OVF image to vCloud director.
1177
1178 Each OVF image represented as single catalog entry in vcloud director.
1179 The method check for existing catalog entry. The check done by file name without file extension.
1180
1181 if given catalog name already present method will respond with existing catalog uuid otherwise
1182 it will create new catalog entry and upload OVF file to newly created catalog.
1183
1184 If method can't create catalog entry or upload a file it will throw exception.
1185
1186 Method accept boolean flag progress that will output progress bar. It useful method
1187 for standalone upload use case. In case to test large file upload.
1188
1189 Args
1190 path: - valid path to OVF file.
1191 progress - boolean progress bar show progress bar.
1192
1193 Return: if image uploaded correct method will provide image catalog UUID.
1194 """
1195
1196 if not path:
1197 raise vimconn.vimconnException("Image path can't be None.")
1198
1199 if not os.path.isfile(path):
1200 raise vimconn.vimconnException("Can't read file. File not found.")
1201
1202 if not os.access(path, os.R_OK):
1203 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1204
1205 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1206
1207 dirpath, filename = os.path.split(path)
1208 flname, file_extension = os.path.splitext(path)
1209 if file_extension != '.ovf':
1210 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1211 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1212
1213 catalog_name = os.path.splitext(filename)[0]
1214 catalog_md5_name = hashlib.md5(path).hexdigest()
1215 self.logger.debug("File name {} Catalog Name {} file path {} "
1216 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1217
1218 try:
1219 catalogs = self.vca.get_catalogs()
1220 except Exception as exp:
1221 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1222 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1223
1224 if len(catalogs) == 0:
1225 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1226 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1227 if not result:
1228 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1229 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1230 media_name=filename, medial_file_name=path, progress=progress)
1231 if not result:
1232 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1233 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1234 else:
1235 for catalog in catalogs:
1236 # search for existing catalog if we find same name we return ID
1237 # TODO optimize this
1238 if catalog.name == catalog_md5_name:
1239 self.logger.debug("Found existing catalog entry for {} "
1240 "catalog id {}".format(catalog_name,
1241 self.get_catalogid(catalog_md5_name, catalogs)))
1242 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1243
1244 # if we didn't find existing catalog we create a new one and upload image.
1245 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1246 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1247 if not result:
1248 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1249
1250 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1251 media_name=filename, medial_file_name=path, progress=progress)
1252 if not result:
1253 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1254
1255 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1256
1257 def get_image_list(self, filter_dict={}):
1258 '''Obtain tenant images from VIM
1259 Filter_dict can be:
1260 name: image name
1261 id: image uuid
1262 checksum: image checksum
1263 location: image path
1264 Returns the image list of dictionaries:
1265 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1266 List can be empty
1267 '''
1268
1269 try:
1270 image_list = []
1271 catalogs = self.vca.get_catalogs()
1272 if len(catalogs) == 0:
1273 return image_list
1274 else:
1275 for catalog in catalogs:
1276 catalog_uuid = catalog.get_id().split(":")[3]
1277 name = catalog.name
1278 filtered_dict = {}
1279 if filter_dict.get("name") and filter_dict["name"] != name:
1280 continue
1281 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1282 continue
1283 filtered_dict ["name"] = name
1284 filtered_dict ["id"] = catalog_uuid
1285 image_list.append(filtered_dict)
1286
1287 self.logger.debug("List of already created catalog items: {}".format(image_list))
1288 return image_list
1289 except Exception as exp:
1290 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1291
1292 def get_vappid(self, vdc=None, vapp_name=None):
1293 """ Method takes vdc object and vApp name and returns vapp uuid or None
1294
1295 Args:
1296 vdc: The VDC object.
1297 vapp_name: is application vappp name identifier
1298
1299 Returns:
1300 The return vApp name otherwise None
1301 """
1302 if vdc is None or vapp_name is None:
1303 return None
1304 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1305 try:
1306 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1307 vdc.ResourceEntities.ResourceEntity)
1308 if len(refs) == 1:
1309 return refs[0].href.split("vapp")[1][1:]
1310 except Exception as e:
1311 self.logger.exception(e)
1312 return False
1313 return None
1314
1315 def check_vapp(self, vdc=None, vapp_uuid=None):
1316 """ Method Method returns True or False if vapp deployed in vCloud director
1317
1318 Args:
1319 vca: Connector to VCA
1320 vdc: The VDC object.
1321 vappid: vappid is application identifier
1322
1323 Returns:
1324 The return True if vApp deployed
1325 :param vdc:
1326 :param vapp_uuid:
1327 """
1328 try:
1329 refs = filter(lambda ref:
1330 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1331 vdc.ResourceEntities.ResourceEntity)
1332 for ref in refs:
1333 vappid = ref.href.split("vapp")[1][1:]
1334 # find vapp with respected vapp uuid
1335 if vappid == vapp_uuid:
1336 return True
1337 except Exception as e:
1338 self.logger.exception(e)
1339 return False
1340 return False
1341
1342 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1343 """Method returns vApp name from vCD and lookup done by vapp_id.
1344
1345 Args:
1346 vca: Connector to VCA
1347 vdc: The VDC object.
1348 vapp_uuid: vappid is application identifier
1349
1350 Returns:
1351 The return vApp name otherwise None
1352 """
1353
1354 try:
1355 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1356 vdc.ResourceEntities.ResourceEntity)
1357 for ref in refs:
1358 # we care only about UUID the rest doesn't matter
1359 vappid = ref.href.split("vapp")[1][1:]
1360 if vappid == vapp_uuid:
1361 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1362 logger=self.logger)
1363
1364 #Retry login if session expired & retry sending request
1365 if response.status_code == 403:
1366 response = self.retry_rest('GET', ref.href)
1367
1368 tree = XmlElementTree.fromstring(response.content)
1369 return tree.attrib['name']
1370 except Exception as e:
1371 self.logger.exception(e)
1372 return None
1373 return None
1374
1375 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1376 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1377 """Adds a VM instance to VIM
1378 Params:
1379 'start': (boolean) indicates if VM must start or created in pause mode.
1380 'image_id','flavor_id': image and flavor VIM id to use for the VM
1381 'net_list': list of interfaces, each one is a dictionary with:
1382 'name': (optional) name for the interface.
1383 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1384 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1385 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1386 'mac_address': (optional) mac address to assign to this interface
1387 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1388 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1389 'type': (mandatory) can be one of:
1390 'virtual', in this case always connected to a network of type 'net_type=bridge'
1391 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1392 can created unconnected
1393 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1394 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1395 are allocated on the same physical NIC
1396 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1397 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1398 or True, it must apply the default VIM behaviour
1399 After execution the method will add the key:
1400 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1401 interface. 'net_list' is modified
1402 'cloud_config': (optional) dictionary with:
1403 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1404 'users': (optional) list of users to be inserted, each item is a dict with:
1405 'name': (mandatory) user name,
1406 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1407 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1408 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1409 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1410 'dest': (mandatory) string with the destination absolute path
1411 'encoding': (optional, by default text). Can be one of:
1412 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1413 'content' (mandatory): string with the content of the file
1414 'permissions': (optional) string with file permissions, typically octal notation '0644'
1415 'owner': (optional) file owner, string with the format 'owner:group'
1416 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1417 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1418 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1419 'size': (mandatory) string with the size of the disk in GB
1420 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1421 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1422 availability_zone_index is None
1423 Returns a tuple with the instance identifier and created_items or raises an exception on error
1424 created_items can be None or a dictionary where this method can include key-values that will be passed to
1425 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1426 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1427 as not present.
1428 """
1429 self.logger.info("Creating new instance for entry {}".format(name))
1430 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1431 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1432
1433 #new vm name = vmname + tenant_id + uuid
1434 new_vm_name = [name, '-', str(uuid.uuid4())]
1435 vmname_andid = ''.join(new_vm_name)
1436
1437 # if vm already deployed we return existing uuid
1438 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1439 # if vapp_uuid is not None:
1440 # return vapp_uuid
1441
1442 # we check for presence of VDC, Catalog entry and Flavor.
1443 vdc = self.get_vdc_details()
1444 if vdc is None:
1445 raise vimconn.vimconnNotFoundException(
1446 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1447 catalogs = self.vca.get_catalogs()
1448 if catalogs is None:
1449 #Retry once, if failed by refreshing token
1450 self.get_token()
1451 catalogs = self.vca.get_catalogs()
1452 if catalogs is None:
1453 raise vimconn.vimconnNotFoundException(
1454 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1455
1456 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1457 if catalog_hash_name:
1458 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1459 else:
1460 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1461 "(Failed retrieve catalog information {})".format(name, image_id))
1462
1463
1464 # Set vCPU and Memory based on flavor.
1465 vm_cpus = None
1466 vm_memory = None
1467 vm_disk = None
1468 numas = None
1469
1470 if flavor_id is not None:
1471 if flavor_id not in vimconnector.flavorlist:
1472 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1473 "Failed retrieve flavor information "
1474 "flavor id {}".format(name, flavor_id))
1475 else:
1476 try:
1477 flavor = vimconnector.flavorlist[flavor_id]
1478 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1479 vm_memory = flavor[FLAVOR_RAM_KEY]
1480 vm_disk = flavor[FLAVOR_DISK_KEY]
1481 extended = flavor.get("extended", None)
1482 if extended:
1483 numas=extended.get("numas", None)
1484
1485 except Exception as exp:
1486 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1487
1488 # image upload creates template name as catalog name space Template.
1489 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1490 power_on = 'false'
1491 if start:
1492 power_on = 'true'
1493
1494 # client must provide at least one entry in net_list if not we report error
1495 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1496 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1497 primary_net = None
1498 primary_netname = None
1499 network_mode = 'bridged'
1500 if net_list is not None and len(net_list) > 0:
1501 for net in net_list:
1502 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1503 primary_net = net
1504 if primary_net is None:
1505 primary_net = net_list[0]
1506
1507 try:
1508 primary_net_id = primary_net['net_id']
1509 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1510 if 'name' in network_dict:
1511 primary_netname = network_dict['name']
1512
1513 except KeyError:
1514 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1515 else:
1516 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1517
1518 # use: 'data', 'bridge', 'mgmt'
1519 # create vApp. Set vcpu and ram based on flavor id.
1520 try:
1521 for retry in (1,2):
1522 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1523 self.get_catalogbyid(image_id, catalogs),
1524 network_name=None, # None while creating vapp
1525 network_mode=network_mode,
1526 vm_name=vmname_andid,
1527 vm_cpus=vm_cpus, # can be None if flavor is None
1528 vm_memory=vm_memory) # can be None if flavor is None
1529
1530 if not vapptask and retry==1:
1531 self.get_token() # Retry getting token
1532 continue
1533 else:
1534 break
1535
1536 if vapptask is None or vapptask is False:
1537 raise vimconn.vimconnUnexpectedResponse(
1538 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1539 if type(vapptask) is VappTask:
1540 self.vca.block_until_completed(vapptask)
1541
1542 except Exception as exp:
1543 raise vimconn.vimconnUnexpectedResponse(
1544 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1545
1546 # we should have now vapp in undeployed state.
1547 try:
1548 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1549
1550 except Exception as exp:
1551 raise vimconn.vimconnUnexpectedResponse(
1552 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1553 .format(vmname_andid, exp))
1554
1555 if vapp_uuid is None:
1556 raise vimconn.vimconnUnexpectedResponse(
1557 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1558 vmname_andid))
1559
1560 #Add PCI passthrough/SRIOV configrations
1561 vm_obj = None
1562 pci_devices_info = []
1563 sriov_net_info = []
1564 reserve_memory = False
1565
1566 for net in net_list:
1567 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1568 pci_devices_info.append(net)
1569 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1570 sriov_net_info.append(net)
1571
1572 #Add PCI
1573 if len(pci_devices_info) > 0:
1574 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1575 vmname_andid ))
1576 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1577 pci_devices_info,
1578 vmname_andid)
1579 if PCI_devices_status:
1580 self.logger.info("Added PCI devives {} to VM {}".format(
1581 pci_devices_info,
1582 vmname_andid)
1583 )
1584 reserve_memory = True
1585 else:
1586 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1587 pci_devices_info,
1588 vmname_andid)
1589 )
1590
1591 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1592 # Modify vm disk
1593 if vm_disk:
1594 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1595 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1596 if result :
1597 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1598
1599 #Add new or existing disks to vApp
1600 if disk_list:
1601 added_existing_disk = False
1602 for disk in disk_list:
1603 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1604 image_id = disk['image_id']
1605 # Adding CD-ROM to VM
1606 # will revisit code once specification ready to support this feature
1607 self.insert_media_to_vm(vapp, image_id)
1608 elif "image_id" in disk and disk["image_id"] is not None:
1609 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1610 disk["image_id"] , vapp_uuid))
1611 self.add_existing_disk(catalogs=catalogs,
1612 image_id=disk["image_id"],
1613 size = disk["size"],
1614 template_name=templateName,
1615 vapp_uuid=vapp_uuid
1616 )
1617 added_existing_disk = True
1618 else:
1619 #Wait till added existing disk gets reflected into vCD database/API
1620 if added_existing_disk:
1621 time.sleep(5)
1622 added_existing_disk = False
1623 self.add_new_disk(vapp_uuid, disk['size'])
1624
1625 if numas:
1626 # Assigning numa affinity setting
1627 for numa in numas:
1628 if 'paired-threads-id' in numa:
1629 paired_threads_id = numa['paired-threads-id']
1630 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1631
1632 # add NICs & connect to networks in netlist
1633 try:
1634 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1635 nicIndex = 0
1636 primary_nic_index = 0
1637 for net in net_list:
1638 # openmano uses network id in UUID format.
1639 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1640 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1641 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1642
1643 if 'net_id' not in net:
1644 continue
1645
1646 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1647 #Same will be returned in refresh_vms_status() as vim_interface_id
1648 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1649
1650 interface_net_id = net['net_id']
1651 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1652 interface_network_mode = net['use']
1653
1654 if interface_network_mode == 'mgmt':
1655 primary_nic_index = nicIndex
1656
1657 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1658 - DHCP (The IP address is obtained from a DHCP service.)
1659 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1660 - NONE (No IP addressing mode specified.)"""
1661
1662 if primary_netname is not None:
1663 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1664 if len(nets) == 1:
1665 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1666
1667 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1668 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1669 if type(task) is GenericTask:
1670 self.vca.block_until_completed(task)
1671 # connect network to VM - with all DHCP by default
1672
1673 type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
1674 if 'type' in net and net['type'] not in type_list:
1675 # fetching nic type from vnf
1676 if 'model' in net:
1677 nic_type = net['model']
1678 self.logger.info("new_vminstance(): adding network adapter "\
1679 "to a network {}".format(nets[0].name))
1680 self.add_network_adapter_to_vms(vapp, nets[0].name,
1681 primary_nic_index,
1682 nicIndex,
1683 net,
1684 nic_type=nic_type)
1685 else:
1686 self.logger.info("new_vminstance(): adding network adapter "\
1687 "to a network {}".format(nets[0].name))
1688 self.add_network_adapter_to_vms(vapp, nets[0].name,
1689 primary_nic_index,
1690 nicIndex,
1691 net)
1692 nicIndex += 1
1693
1694 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1695 # cloud-init for ssh-key injection
1696 if cloud_config:
1697 self.cloud_init(vapp,cloud_config)
1698
1699 # deploy and power on vm
1700 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1701 deploytask = vapp.deploy(powerOn=False)
1702 if type(deploytask) is GenericTask:
1703 self.vca.block_until_completed(deploytask)
1704
1705 # ############# Stub code for SRIOV #################
1706 #Add SRIOV
1707 # if len(sriov_net_info) > 0:
1708 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1709 # vmname_andid ))
1710 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1711 # sriov_net_info,
1712 # vmname_andid)
1713 # if sriov_status:
1714 # self.logger.info("Added SRIOV {} to VM {}".format(
1715 # sriov_net_info,
1716 # vmname_andid)
1717 # )
1718 # reserve_memory = True
1719 # else:
1720 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1721 # sriov_net_info,
1722 # vmname_andid)
1723 # )
1724
1725 # If VM has PCI devices or SRIOV reserve memory for VM
1726 if reserve_memory:
1727 memReserve = vm_obj.config.hardware.memoryMB
1728 spec = vim.vm.ConfigSpec()
1729 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1730 task = vm_obj.ReconfigVM_Task(spec=spec)
1731 if task:
1732 result = self.wait_for_vcenter_task(task, vcenter_conect)
1733 self.logger.info("Reserved memory {} MB for "
1734 "VM VM status: {}".format(str(memReserve), result))
1735 else:
1736 self.logger.info("Fail to reserved memory {} to VM {}".format(
1737 str(memReserve), str(vm_obj)))
1738
1739 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1740
1741 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1742 poweron_task = vapp.poweron()
1743 if type(poweron_task) is GenericTask:
1744 self.vca.block_until_completed(poweron_task)
1745
1746 except Exception as exp :
1747 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1748 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1749 .format(name, exp))
1750 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1751 .format(name, exp))
1752
1753 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1754 wait_time = 0
1755 vapp_uuid = None
1756 while wait_time <= MAX_WAIT_TIME:
1757 try:
1758 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1759 except Exception as exp:
1760 raise vimconn.vimconnUnexpectedResponse(
1761 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1762 .format(vmname_andid, exp))
1763
1764 if vapp and vapp.me.deployed:
1765 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1766 break
1767 else:
1768 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1769 time.sleep(INTERVAL_TIME)
1770
1771 wait_time +=INTERVAL_TIME
1772
1773 if vapp_uuid is not None:
1774 return vapp_uuid, None
1775 else:
1776 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1777
1778 ##
1779 ##
1780 ## based on current discussion
1781 ##
1782 ##
1783 ## server:
1784 # created: '2016-09-08T11:51:58'
1785 # description: simple-instance.linux1.1
1786 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1787 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1788 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1789 # status: ACTIVE
1790 # error_msg:
1791 # interfaces: …
1792 #
1793 def get_vminstance(self, vim_vm_uuid=None):
1794 """Returns the VM instance information from VIM"""
1795
1796 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1797
1798 vdc = self.get_vdc_details()
1799 if vdc is None:
1800 raise vimconn.vimconnConnectionException(
1801 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1802
1803 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1804 if not vm_info_dict:
1805 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1806 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1807
1808 status_key = vm_info_dict['status']
1809 error = ''
1810 try:
1811 vm_dict = {'created': vm_info_dict['created'],
1812 'description': vm_info_dict['name'],
1813 'status': vcdStatusCode2manoFormat[int(status_key)],
1814 'hostId': vm_info_dict['vmuuid'],
1815 'error_msg': error,
1816 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1817
1818 if 'interfaces' in vm_info_dict:
1819 vm_dict['interfaces'] = vm_info_dict['interfaces']
1820 else:
1821 vm_dict['interfaces'] = []
1822 except KeyError:
1823 vm_dict = {'created': '',
1824 'description': '',
1825 'status': vcdStatusCode2manoFormat[int(-1)],
1826 'hostId': vm_info_dict['vmuuid'],
1827 'error_msg': "Inconsistency state",
1828 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1829
1830 return vm_dict
1831
1832 def delete_vminstance(self, vm__vim_uuid, created_items=None):
1833 """Method poweroff and remove VM instance from vcloud director network.
1834
1835 Args:
1836 vm__vim_uuid: VM UUID
1837
1838 Returns:
1839 Returns the instance identifier
1840 """
1841
1842 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1843
1844 vdc = self.get_vdc_details()
1845 if vdc is None:
1846 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1847 self.tenant_name))
1848 raise vimconn.vimconnException(
1849 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1850
1851 try:
1852 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1853 if vapp_name is None:
1854 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1855 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1856 else:
1857 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1858
1859 # Delete vApp and wait for status change if task executed and vApp is None.
1860 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1861
1862 if vapp:
1863 if vapp.me.deployed:
1864 self.logger.info("Powering off vApp {}".format(vapp_name))
1865 #Power off vApp
1866 powered_off = False
1867 wait_time = 0
1868 while wait_time <= MAX_WAIT_TIME:
1869 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1870 if not vapp:
1871 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1872 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1873
1874 power_off_task = vapp.poweroff()
1875 if type(power_off_task) is GenericTask:
1876 result = self.vca.block_until_completed(power_off_task)
1877 if result:
1878 powered_off = True
1879 break
1880 else:
1881 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1882 time.sleep(INTERVAL_TIME)
1883
1884 wait_time +=INTERVAL_TIME
1885 if not powered_off:
1886 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1887 else:
1888 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1889
1890 #Undeploy vApp
1891 self.logger.info("Undeploy vApp {}".format(vapp_name))
1892 wait_time = 0
1893 undeployed = False
1894 while wait_time <= MAX_WAIT_TIME:
1895 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1896 if not vapp:
1897 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1898 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1899 undeploy_task = vapp.undeploy(action='powerOff')
1900
1901 if type(undeploy_task) is GenericTask:
1902 result = self.vca.block_until_completed(undeploy_task)
1903 if result:
1904 undeployed = True
1905 break
1906 else:
1907 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1908 time.sleep(INTERVAL_TIME)
1909
1910 wait_time +=INTERVAL_TIME
1911
1912 if not undeployed:
1913 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1914
1915 # delete vapp
1916 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1917 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1918
1919 if vapp is not None:
1920 wait_time = 0
1921 result = False
1922
1923 while wait_time <= MAX_WAIT_TIME:
1924 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1925 if not vapp:
1926 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1927 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1928
1929 delete_task = vapp.delete()
1930
1931 if type(delete_task) is GenericTask:
1932 self.vca.block_until_completed(delete_task)
1933 result = self.vca.block_until_completed(delete_task)
1934 if result:
1935 break
1936 else:
1937 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1938 time.sleep(INTERVAL_TIME)
1939
1940 wait_time +=INTERVAL_TIME
1941
1942 if not result:
1943 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1944
1945 except:
1946 self.logger.debug(traceback.format_exc())
1947 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1948
1949 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1950 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1951 return vm__vim_uuid
1952 else:
1953 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1954
1955 def refresh_vms_status(self, vm_list):
1956 """Get the status of the virtual machines and their interfaces/ports
1957 Params: the list of VM identifiers
1958 Returns a dictionary with:
1959 vm_id: #VIM id of this Virtual Machine
1960 status: #Mandatory. Text with one of:
1961 # DELETED (not found at vim)
1962 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1963 # OTHER (Vim reported other status not understood)
1964 # ERROR (VIM indicates an ERROR status)
1965 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1966 # CREATING (on building process), ERROR
1967 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1968 #
1969 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1970 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1971 interfaces:
1972 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1973 mac_address: #Text format XX:XX:XX:XX:XX:XX
1974 vim_net_id: #network id where this interface is connected
1975 vim_interface_id: #interface/port VIM id
1976 ip_address: #null, or text with IPv4, IPv6 address
1977 """
1978
1979 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1980
1981 vdc = self.get_vdc_details()
1982 if vdc is None:
1983 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1984
1985 vms_dict = {}
1986 nsx_edge_list = []
1987 for vmuuid in vm_list:
1988 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1989 if vmname is not None:
1990
1991 try:
1992 vm_pci_details = self.get_vm_pci_details(vmuuid)
1993 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1994 vm_info = the_vapp.get_vms_details()
1995 vm_status = vm_info[0]['status']
1996 vm_info[0].update(vm_pci_details)
1997
1998 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1999 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
2000 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2001
2002 # get networks
2003 vm_app_networks = the_vapp.get_vms_network_info()
2004 for vapp_network in vm_app_networks:
2005 for vm_network in vapp_network:
2006 if vm_network['name'] == vmname:
2007 #Assign IP Address based on MAC Address in NSX DHCP lease info
2008 if vm_network['ip'] is None:
2009 if not nsx_edge_list:
2010 nsx_edge_list = self.get_edge_details()
2011 if nsx_edge_list is None:
2012 raise vimconn.vimconnException("refresh_vms_status:"\
2013 "Failed to get edge details from NSX Manager")
2014 if vm_network['mac'] is not None:
2015 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2016
2017 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2018 interface = {"mac_address": vm_network['mac'],
2019 "vim_net_id": vm_net_id,
2020 "vim_interface_id": vm_net_id,
2021 'ip_address': vm_network['ip']}
2022 # interface['vim_info'] = yaml.safe_dump(vm_network)
2023 vm_dict["interfaces"].append(interface)
2024 # add a vm to vm dict
2025 vms_dict.setdefault(vmuuid, vm_dict)
2026 except Exception as exp:
2027 self.logger.debug("Error in response {}".format(exp))
2028 self.logger.debug(traceback.format_exc())
2029
2030 return vms_dict
2031
2032
2033 def get_edge_details(self):
2034 """Get the NSX edge list from NSX Manager
2035 Returns list of NSX edges
2036 """
2037 edge_list = []
2038 rheaders = {'Content-Type': 'application/xml'}
2039 nsx_api_url = '/api/4.0/edges'
2040
2041 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2042
2043 try:
2044 resp = requests.get(self.nsx_manager + nsx_api_url,
2045 auth = (self.nsx_user, self.nsx_password),
2046 verify = False, headers = rheaders)
2047 if resp.status_code == requests.codes.ok:
2048 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2049 for edge_pages in paged_Edge_List:
2050 if edge_pages.tag == 'edgePage':
2051 for edge_summary in edge_pages:
2052 if edge_summary.tag == 'pagingInfo':
2053 for element in edge_summary:
2054 if element.tag == 'totalCount' and element.text == '0':
2055 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2056 .format(self.nsx_manager))
2057
2058 if edge_summary.tag == 'edgeSummary':
2059 for element in edge_summary:
2060 if element.tag == 'id':
2061 edge_list.append(element.text)
2062 else:
2063 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2064 .format(self.nsx_manager))
2065
2066 if not edge_list:
2067 raise vimconn.vimconnException("get_edge_details: "\
2068 "No NSX edge details found: {}"
2069 .format(self.nsx_manager))
2070 else:
2071 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2072 return edge_list
2073 else:
2074 self.logger.debug("get_edge_details: "
2075 "Failed to get NSX edge details from NSX Manager: {}"
2076 .format(resp.content))
2077 return None
2078
2079 except Exception as exp:
2080 self.logger.debug("get_edge_details: "\
2081 "Failed to get NSX edge details from NSX Manager: {}"
2082 .format(exp))
2083 raise vimconn.vimconnException("get_edge_details: "\
2084 "Failed to get NSX edge details from NSX Manager: {}"
2085 .format(exp))
2086
2087
2088 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2089 """Get IP address details from NSX edges, using the MAC address
2090 PARAMS: nsx_edges : List of NSX edges
2091 mac_address : Find IP address corresponding to this MAC address
2092 Returns: IP address corrresponding to the provided MAC address
2093 """
2094
2095 ip_addr = None
2096 rheaders = {'Content-Type': 'application/xml'}
2097
2098 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2099
2100 try:
2101 for edge in nsx_edges:
2102 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2103
2104 resp = requests.get(self.nsx_manager + nsx_api_url,
2105 auth = (self.nsx_user, self.nsx_password),
2106 verify = False, headers = rheaders)
2107
2108 if resp.status_code == requests.codes.ok:
2109 dhcp_leases = XmlElementTree.fromstring(resp.text)
2110 for child in dhcp_leases:
2111 if child.tag == 'dhcpLeaseInfo':
2112 dhcpLeaseInfo = child
2113 for leaseInfo in dhcpLeaseInfo:
2114 for elem in leaseInfo:
2115 if (elem.tag)=='macAddress':
2116 edge_mac_addr = elem.text
2117 if (elem.tag)=='ipAddress':
2118 ip_addr = elem.text
2119 if edge_mac_addr is not None:
2120 if edge_mac_addr == mac_address:
2121 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2122 .format(ip_addr, mac_address,edge))
2123 return ip_addr
2124 else:
2125 self.logger.debug("get_ipaddr_from_NSXedge: "\
2126 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2127 .format(resp.content))
2128
2129 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2130 return None
2131
2132 except XmlElementTree.ParseError as Err:
2133 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2134
2135
2136 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2137 """Send and action over a VM instance from VIM
2138 Returns the vm_id if the action was successfully sent to the VIM"""
2139
2140 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2141 if vm__vim_uuid is None or action_dict is None:
2142 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2143
2144 vdc = self.get_vdc_details()
2145 if vdc is None:
2146 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2147
2148 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2149 if vapp_name is None:
2150 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2151 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2152 else:
2153 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2154
2155 try:
2156 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2157 # TODO fix all status
2158 if "start" in action_dict:
2159 vm_info = the_vapp.get_vms_details()
2160 vm_status = vm_info[0]['status']
2161 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2162 if vm_status == "Suspended" or vm_status == "Powered off":
2163 power_on_task = the_vapp.poweron()
2164 result = self.vca.block_until_completed(power_on_task)
2165 self.instance_actions_result("start", result, vapp_name)
2166 elif "rebuild" in action_dict:
2167 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2168 rebuild_task = the_vapp.deploy(powerOn=True)
2169 result = self.vca.block_until_completed(rebuild_task)
2170 self.instance_actions_result("rebuild", result, vapp_name)
2171 elif "pause" in action_dict:
2172 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2173 pause_task = the_vapp.undeploy(action='suspend')
2174 result = self.vca.block_until_completed(pause_task)
2175 self.instance_actions_result("pause", result, vapp_name)
2176 elif "resume" in action_dict:
2177 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2178 power_task = the_vapp.poweron()
2179 result = self.vca.block_until_completed(power_task)
2180 self.instance_actions_result("resume", result, vapp_name)
2181 elif "shutoff" in action_dict or "shutdown" in action_dict:
2182 action_name , value = action_dict.items()[0]
2183 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2184 power_off_task = the_vapp.undeploy(action='powerOff')
2185 result = self.vca.block_until_completed(power_off_task)
2186 if action_name == "shutdown":
2187 self.instance_actions_result("shutdown", result, vapp_name)
2188 else:
2189 self.instance_actions_result("shutoff", result, vapp_name)
2190 elif "forceOff" in action_dict:
2191 result = the_vapp.undeploy(action='force')
2192 self.instance_actions_result("forceOff", result, vapp_name)
2193 elif "reboot" in action_dict:
2194 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2195 reboot_task = the_vapp.reboot()
2196 else:
2197 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2198 return None
2199 except Exception as exp :
2200 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2201 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2202
2203 def instance_actions_result(self, action, result, vapp_name):
2204 if result:
2205 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2206 else:
2207 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2208
2209 def get_vminstance_console(self, vm_id, console_type="vnc"):
2210 """
2211 Get a console for the virtual machine
2212 Params:
2213 vm_id: uuid of the VM
2214 console_type, can be:
2215 "novnc" (by default), "xvpvnc" for VNC types,
2216 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2217 Returns dict with the console parameters:
2218 protocol: ssh, ftp, http, https, ...
2219 server: usually ip address
2220 port: the http, ssh, ... port
2221 suffix: extra text, e.g. the http path and query string
2222 """
2223 raise vimconn.vimconnNotImplemented("Should have implemented this")
2224
2225 # NOT USED METHODS in current version
2226
2227 def host_vim2gui(self, host, server_dict):
2228 """Transform host dictionary from VIM format to GUI format,
2229 and append to the server_dict
2230 """
2231 raise vimconn.vimconnNotImplemented("Should have implemented this")
2232
2233 def get_hosts_info(self):
2234 """Get the information of deployed hosts
2235 Returns the hosts content"""
2236 raise vimconn.vimconnNotImplemented("Should have implemented this")
2237
2238 def get_hosts(self, vim_tenant):
2239 """Get the hosts and deployed instances
2240 Returns the hosts content"""
2241 raise vimconn.vimconnNotImplemented("Should have implemented this")
2242
2243 def get_processor_rankings(self):
2244 """Get the processor rankings in the VIM database"""
2245 raise vimconn.vimconnNotImplemented("Should have implemented this")
2246
2247 def new_host(self, host_data):
2248 """Adds a new host to VIM"""
2249 '''Returns status code of the VIM response'''
2250 raise vimconn.vimconnNotImplemented("Should have implemented this")
2251
2252 def new_external_port(self, port_data):
2253 """Adds a external port to VIM"""
2254 '''Returns the port identifier'''
2255 raise vimconn.vimconnNotImplemented("Should have implemented this")
2256
2257 def new_external_network(self, net_name, net_type):
2258 """Adds a external network to VIM (shared)"""
2259 '''Returns the network identifier'''
2260 raise vimconn.vimconnNotImplemented("Should have implemented this")
2261
2262 def connect_port_network(self, port_id, network_id, admin=False):
2263 """Connects a external port to a network"""
2264 '''Returns status code of the VIM response'''
2265 raise vimconn.vimconnNotImplemented("Should have implemented this")
2266
2267 def new_vminstancefromJSON(self, vm_data):
2268 """Adds a VM instance to VIM"""
2269 '''Returns the instance identifier'''
2270 raise vimconn.vimconnNotImplemented("Should have implemented this")
2271
2272 def get_network_name_by_id(self, network_uuid=None):
2273 """Method gets vcloud director network named based on supplied uuid.
2274
2275 Args:
2276 network_uuid: network_id
2277
2278 Returns:
2279 The return network name.
2280 """
2281
2282 if not network_uuid:
2283 return None
2284
2285 try:
2286 org_dict = self.get_org(self.org_uuid)
2287 if 'networks' in org_dict:
2288 org_network_dict = org_dict['networks']
2289 for net_uuid in org_network_dict:
2290 if net_uuid == network_uuid:
2291 return org_network_dict[net_uuid]
2292 except:
2293 self.logger.debug("Exception in get_network_name_by_id")
2294 self.logger.debug(traceback.format_exc())
2295
2296 return None
2297
2298 def get_network_id_by_name(self, network_name=None):
2299 """Method gets vcloud director network uuid based on supplied name.
2300
2301 Args:
2302 network_name: network_name
2303 Returns:
2304 The return network uuid.
2305 network_uuid: network_id
2306 """
2307
2308 if not network_name:
2309 self.logger.debug("get_network_id_by_name() : Network name is empty")
2310 return None
2311
2312 try:
2313 org_dict = self.get_org(self.org_uuid)
2314 if org_dict and 'networks' in org_dict:
2315 org_network_dict = org_dict['networks']
2316 for net_uuid,net_name in org_network_dict.iteritems():
2317 if net_name == network_name:
2318 return net_uuid
2319
2320 except KeyError as exp:
2321 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2322
2323 return None
2324
2325 def list_org_action(self):
2326 """
2327 Method leverages vCloud director and query for available organization for particular user
2328
2329 Args:
2330 vca - is active VCA connection.
2331 vdc_name - is a vdc name that will be used to query vms action
2332
2333 Returns:
2334 The return XML respond
2335 """
2336
2337 url_list = [self.vca.host, '/api/org']
2338 vm_list_rest_call = ''.join(url_list)
2339
2340 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2341 response = Http.get(url=vm_list_rest_call,
2342 headers=self.vca.vcloud_session.get_vcloud_headers(),
2343 verify=self.vca.verify,
2344 logger=self.vca.logger)
2345
2346 if response.status_code == 403:
2347 response = self.retry_rest('GET', vm_list_rest_call)
2348
2349 if response.status_code == requests.codes.ok:
2350 return response.content
2351
2352 return None
2353
2354 def get_org_action(self, org_uuid=None):
2355 """
2356 Method leverages vCloud director and retrieve available object fdr organization.
2357
2358 Args:
2359 vca - is active VCA connection.
2360 vdc_name - is a vdc name that will be used to query vms action
2361
2362 Returns:
2363 The return XML respond
2364 """
2365
2366 if org_uuid is None:
2367 return None
2368
2369 url_list = [self.vca.host, '/api/org/', org_uuid]
2370 vm_list_rest_call = ''.join(url_list)
2371
2372 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2373 response = Http.get(url=vm_list_rest_call,
2374 headers=self.vca.vcloud_session.get_vcloud_headers(),
2375 verify=self.vca.verify,
2376 logger=self.vca.logger)
2377
2378 #Retry login if session expired & retry sending request
2379 if response.status_code == 403:
2380 response = self.retry_rest('GET', vm_list_rest_call)
2381
2382 if response.status_code == requests.codes.ok:
2383 return response.content
2384
2385 return None
2386
2387 def get_org(self, org_uuid=None):
2388 """
2389 Method retrieves available organization in vCloud Director
2390
2391 Args:
2392 org_uuid - is a organization uuid.
2393
2394 Returns:
2395 The return dictionary with following key
2396 "network" - for network list under the org
2397 "catalogs" - for network list under the org
2398 "vdcs" - for vdc list under org
2399 """
2400
2401 org_dict = {}
2402
2403 if org_uuid is None:
2404 return org_dict
2405
2406 content = self.get_org_action(org_uuid=org_uuid)
2407 try:
2408 vdc_list = {}
2409 network_list = {}
2410 catalog_list = {}
2411 vm_list_xmlroot = XmlElementTree.fromstring(content)
2412 for child in vm_list_xmlroot:
2413 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2414 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2415 org_dict['vdcs'] = vdc_list
2416 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2417 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2418 org_dict['networks'] = network_list
2419 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2420 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2421 org_dict['catalogs'] = catalog_list
2422 except:
2423 pass
2424
2425 return org_dict
2426
2427 def get_org_list(self):
2428 """
2429 Method retrieves available organization in vCloud Director
2430
2431 Args:
2432 vca - is active VCA connection.
2433
2434 Returns:
2435 The return dictionary and key for each entry VDC UUID
2436 """
2437
2438 org_dict = {}
2439
2440 content = self.list_org_action()
2441 try:
2442 vm_list_xmlroot = XmlElementTree.fromstring(content)
2443 for vm_xml in vm_list_xmlroot:
2444 if vm_xml.tag.split("}")[1] == 'Org':
2445 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2446 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2447 except:
2448 pass
2449
2450 return org_dict
2451
2452 def vms_view_action(self, vdc_name=None):
2453 """ Method leverages vCloud director vms query call
2454
2455 Args:
2456 vca - is active VCA connection.
2457 vdc_name - is a vdc name that will be used to query vms action
2458
2459 Returns:
2460 The return XML respond
2461 """
2462 vca = self.connect()
2463 if vdc_name is None:
2464 return None
2465
2466 url_list = [vca.host, '/api/vms/query']
2467 vm_list_rest_call = ''.join(url_list)
2468
2469 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2470 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2471 vca.vcloud_session.organization.Link)
2472 if len(refs) == 1:
2473 response = Http.get(url=vm_list_rest_call,
2474 headers=vca.vcloud_session.get_vcloud_headers(),
2475 verify=vca.verify,
2476 logger=vca.logger)
2477 if response.status_code == requests.codes.ok:
2478 return response.content
2479
2480 return None
2481
2482 def get_vapp_list(self, vdc_name=None):
2483 """
2484 Method retrieves vApp list deployed vCloud director and returns a dictionary
2485 contains a list of all vapp deployed for queried VDC.
2486 The key for a dictionary is vApp UUID
2487
2488
2489 Args:
2490 vca - is active VCA connection.
2491 vdc_name - is a vdc name that will be used to query vms action
2492
2493 Returns:
2494 The return dictionary and key for each entry vapp UUID
2495 """
2496
2497 vapp_dict = {}
2498 if vdc_name is None:
2499 return vapp_dict
2500
2501 content = self.vms_view_action(vdc_name=vdc_name)
2502 try:
2503 vm_list_xmlroot = XmlElementTree.fromstring(content)
2504 for vm_xml in vm_list_xmlroot:
2505 if vm_xml.tag.split("}")[1] == 'VMRecord':
2506 if vm_xml.attrib['isVAppTemplate'] == 'true':
2507 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2508 if 'vappTemplate-' in rawuuid[0]:
2509 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2510 # vm and use raw UUID as key
2511 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2512 except:
2513 pass
2514
2515 return vapp_dict
2516
2517 def get_vm_list(self, vdc_name=None):
2518 """
2519 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2520 contains a list of all VM's deployed for queried VDC.
2521 The key for a dictionary is VM UUID
2522
2523
2524 Args:
2525 vca - is active VCA connection.
2526 vdc_name - is a vdc name that will be used to query vms action
2527
2528 Returns:
2529 The return dictionary and key for each entry vapp UUID
2530 """
2531 vm_dict = {}
2532
2533 if vdc_name is None:
2534 return vm_dict
2535
2536 content = self.vms_view_action(vdc_name=vdc_name)
2537 try:
2538 vm_list_xmlroot = XmlElementTree.fromstring(content)
2539 for vm_xml in vm_list_xmlroot:
2540 if vm_xml.tag.split("}")[1] == 'VMRecord':
2541 if vm_xml.attrib['isVAppTemplate'] == 'false':
2542 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2543 if 'vm-' in rawuuid[0]:
2544 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2545 # vm and use raw UUID as key
2546 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2547 except:
2548 pass
2549
2550 return vm_dict
2551
2552 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2553 """
2554 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2555 contains a list of all VM's deployed for queried VDC.
2556 The key for a dictionary is VM UUID
2557
2558
2559 Args:
2560 vca - is active VCA connection.
2561 vdc_name - is a vdc name that will be used to query vms action
2562
2563 Returns:
2564 The return dictionary and key for each entry vapp UUID
2565 """
2566 vm_dict = {}
2567 vca = self.connect()
2568 if not vca:
2569 raise vimconn.vimconnConnectionException("self.connect() is failed")
2570
2571 if vdc_name is None:
2572 return vm_dict
2573
2574 content = self.vms_view_action(vdc_name=vdc_name)
2575 try:
2576 vm_list_xmlroot = XmlElementTree.fromstring(content)
2577 for vm_xml in vm_list_xmlroot:
2578 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2579 # lookup done by UUID
2580 if isuuid:
2581 if vapp_name in vm_xml.attrib['container']:
2582 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2583 if 'vm-' in rawuuid[0]:
2584 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2585 break
2586 # lookup done by Name
2587 else:
2588 if vapp_name in vm_xml.attrib['name']:
2589 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2590 if 'vm-' in rawuuid[0]:
2591 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2592 break
2593 except:
2594 pass
2595
2596 return vm_dict
2597
2598 def get_network_action(self, network_uuid=None):
2599 """
2600 Method leverages vCloud director and query network based on network uuid
2601
2602 Args:
2603 vca - is active VCA connection.
2604 network_uuid - is a network uuid
2605
2606 Returns:
2607 The return XML respond
2608 """
2609
2610 if network_uuid is None:
2611 return None
2612
2613 url_list = [self.vca.host, '/api/network/', network_uuid]
2614 vm_list_rest_call = ''.join(url_list)
2615
2616 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2617 response = Http.get(url=vm_list_rest_call,
2618 headers=self.vca.vcloud_session.get_vcloud_headers(),
2619 verify=self.vca.verify,
2620 logger=self.vca.logger)
2621
2622 #Retry login if session expired & retry sending request
2623 if response.status_code == 403:
2624 response = self.retry_rest('GET', vm_list_rest_call)
2625
2626 if response.status_code == requests.codes.ok:
2627 return response.content
2628
2629 return None
2630
2631 def get_vcd_network(self, network_uuid=None):
2632 """
2633 Method retrieves available network from vCloud Director
2634
2635 Args:
2636 network_uuid - is VCD network UUID
2637
2638 Each element serialized as key : value pair
2639
2640 Following keys available for access. network_configuration['Gateway'}
2641 <Configuration>
2642 <IpScopes>
2643 <IpScope>
2644 <IsInherited>true</IsInherited>
2645 <Gateway>172.16.252.100</Gateway>
2646 <Netmask>255.255.255.0</Netmask>
2647 <Dns1>172.16.254.201</Dns1>
2648 <Dns2>172.16.254.202</Dns2>
2649 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2650 <IsEnabled>true</IsEnabled>
2651 <IpRanges>
2652 <IpRange>
2653 <StartAddress>172.16.252.1</StartAddress>
2654 <EndAddress>172.16.252.99</EndAddress>
2655 </IpRange>
2656 </IpRanges>
2657 </IpScope>
2658 </IpScopes>
2659 <FenceMode>bridged</FenceMode>
2660
2661 Returns:
2662 The return dictionary and key for each entry vapp UUID
2663 """
2664
2665 network_configuration = {}
2666 if network_uuid is None:
2667 return network_uuid
2668
2669 try:
2670 content = self.get_network_action(network_uuid=network_uuid)
2671 vm_list_xmlroot = XmlElementTree.fromstring(content)
2672
2673 network_configuration['status'] = vm_list_xmlroot.get("status")
2674 network_configuration['name'] = vm_list_xmlroot.get("name")
2675 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2676
2677 for child in vm_list_xmlroot:
2678 if child.tag.split("}")[1] == 'IsShared':
2679 network_configuration['isShared'] = child.text.strip()
2680 if child.tag.split("}")[1] == 'Configuration':
2681 for configuration in child.iter():
2682 tagKey = configuration.tag.split("}")[1].strip()
2683 if tagKey != "":
2684 network_configuration[tagKey] = configuration.text.strip()
2685 return network_configuration
2686 except Exception as exp :
2687 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2688 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2689
2690 return network_configuration
2691
2692 def delete_network_action(self, network_uuid=None):
2693 """
2694 Method delete given network from vCloud director
2695
2696 Args:
2697 network_uuid - is a network uuid that client wish to delete
2698
2699 Returns:
2700 The return None or XML respond or false
2701 """
2702
2703 vca = self.connect_as_admin()
2704 if not vca:
2705 raise vimconn.vimconnConnectionException("self.connect() is failed")
2706 if network_uuid is None:
2707 return False
2708
2709 url_list = [vca.host, '/api/admin/network/', network_uuid]
2710 vm_list_rest_call = ''.join(url_list)
2711
2712 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2713 response = Http.delete(url=vm_list_rest_call,
2714 headers=vca.vcloud_session.get_vcloud_headers(),
2715 verify=vca.verify,
2716 logger=vca.logger)
2717
2718 if response.status_code == 202:
2719 return True
2720
2721 return False
2722
2723 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2724 ip_profile=None, isshared='true'):
2725 """
2726 Method create network in vCloud director
2727
2728 Args:
2729 network_name - is network name to be created.
2730 net_type - can be 'bridge','data','ptp','mgmt'.
2731 ip_profile is a dict containing the IP parameters of the network
2732 isshared - is a boolean
2733 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2734 It optional attribute. by default if no parent network indicate the first available will be used.
2735
2736 Returns:
2737 The return network uuid or return None
2738 """
2739
2740 new_network_name = [network_name, '-', str(uuid.uuid4())]
2741 content = self.create_network_rest(network_name=''.join(new_network_name),
2742 ip_profile=ip_profile,
2743 net_type=net_type,
2744 parent_network_uuid=parent_network_uuid,
2745 isshared=isshared)
2746 if content is None:
2747 self.logger.debug("Failed create network {}.".format(network_name))
2748 return None
2749
2750 try:
2751 vm_list_xmlroot = XmlElementTree.fromstring(content)
2752 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2753 if len(vcd_uuid) == 4:
2754 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2755 return vcd_uuid[3]
2756 except:
2757 self.logger.debug("Failed create network {}".format(network_name))
2758 return None
2759
2760 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2761 ip_profile=None, isshared='true'):
2762 """
2763 Method create network in vCloud director
2764
2765 Args:
2766 network_name - is network name to be created.
2767 net_type - can be 'bridge','data','ptp','mgmt'.
2768 ip_profile is a dict containing the IP parameters of the network
2769 isshared - is a boolean
2770 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2771 It optional attribute. by default if no parent network indicate the first available will be used.
2772
2773 Returns:
2774 The return network uuid or return None
2775 """
2776
2777 vca = self.connect_as_admin()
2778 if not vca:
2779 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2780 if network_name is None:
2781 return None
2782
2783 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2784 vm_list_rest_call = ''.join(url_list)
2785 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2786 response = Http.get(url=vm_list_rest_call,
2787 headers=vca.vcloud_session.get_vcloud_headers(),
2788 verify=vca.verify,
2789 logger=vca.logger)
2790
2791 provider_network = None
2792 available_networks = None
2793 add_vdc_rest_url = None
2794
2795 if response.status_code != requests.codes.ok:
2796 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2797 response.status_code))
2798 return None
2799 else:
2800 try:
2801 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2802 for child in vm_list_xmlroot:
2803 if child.tag.split("}")[1] == 'ProviderVdcReference':
2804 provider_network = child.attrib.get('href')
2805 # application/vnd.vmware.admin.providervdc+xml
2806 if child.tag.split("}")[1] == 'Link':
2807 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2808 and child.attrib.get('rel') == 'add':
2809 add_vdc_rest_url = child.attrib.get('href')
2810 except:
2811 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2812 self.logger.debug("Respond body {}".format(response.content))
2813 return None
2814
2815 # find pvdc provided available network
2816 response = Http.get(url=provider_network,
2817 headers=vca.vcloud_session.get_vcloud_headers(),
2818 verify=vca.verify,
2819 logger=vca.logger)
2820 if response.status_code != requests.codes.ok:
2821 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2822 response.status_code))
2823 return None
2824
2825 # available_networks.split("/")[-1]
2826
2827 if parent_network_uuid is None:
2828 try:
2829 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2830 for child in vm_list_xmlroot.iter():
2831 if child.tag.split("}")[1] == 'AvailableNetworks':
2832 for networks in child.iter():
2833 # application/vnd.vmware.admin.network+xml
2834 if networks.attrib.get('href') is not None:
2835 available_networks = networks.attrib.get('href')
2836 break
2837 except:
2838 return None
2839
2840 try:
2841 #Configure IP profile of the network
2842 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2843
2844 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2845 subnet_rand = random.randint(0, 255)
2846 ip_base = "192.168.{}.".format(subnet_rand)
2847 ip_profile['subnet_address'] = ip_base + "0/24"
2848 else:
2849 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2850
2851 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2852 ip_profile['gateway_address']=ip_base + "1"
2853 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2854 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2855 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2856 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2857 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2858 ip_profile['dhcp_start_address']=ip_base + "3"
2859 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2860 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2861 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2862 ip_profile['dns_address']=ip_base + "2"
2863
2864 gateway_address=ip_profile['gateway_address']
2865 dhcp_count=int(ip_profile['dhcp_count'])
2866 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2867
2868 if ip_profile['dhcp_enabled']==True:
2869 dhcp_enabled='true'
2870 else:
2871 dhcp_enabled='false'
2872 dhcp_start_address=ip_profile['dhcp_start_address']
2873
2874 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2875 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2876 end_ip_int += dhcp_count - 1
2877 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2878
2879 ip_version=ip_profile['ip_version']
2880 dns_address=ip_profile['dns_address']
2881 except KeyError as exp:
2882 self.logger.debug("Create Network REST: Key error {}".format(exp))
2883 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2884
2885 # either use client provided UUID or search for a first available
2886 # if both are not defined we return none
2887 if parent_network_uuid is not None:
2888 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2889 add_vdc_rest_url = ''.join(url_list)
2890
2891 #Creating all networks as Direct Org VDC type networks.
2892 #Unused in case of Underlay (data/ptp) network interface.
2893 fence_mode="bridged"
2894 is_inherited='false'
2895 dns_list = dns_address.split(";")
2896 dns1 = dns_list[0]
2897 dns2_text = ""
2898 if len(dns_list) >= 2:
2899 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2900 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2901 <Description>Openmano created</Description>
2902 <Configuration>
2903 <IpScopes>
2904 <IpScope>
2905 <IsInherited>{1:s}</IsInherited>
2906 <Gateway>{2:s}</Gateway>
2907 <Netmask>{3:s}</Netmask>
2908 <Dns1>{4:s}</Dns1>{5:s}
2909 <IsEnabled>{6:s}</IsEnabled>
2910 <IpRanges>
2911 <IpRange>
2912 <StartAddress>{7:s}</StartAddress>
2913 <EndAddress>{8:s}</EndAddress>
2914 </IpRange>
2915 </IpRanges>
2916 </IpScope>
2917 </IpScopes>
2918 <ParentNetwork href="{9:s}"/>
2919 <FenceMode>{10:s}</FenceMode>
2920 </Configuration>
2921 <IsShared>{11:s}</IsShared>
2922 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2923 subnet_address, dns1, dns2_text, dhcp_enabled,
2924 dhcp_start_address, dhcp_end_address, available_networks,
2925 fence_mode, isshared)
2926
2927 headers = vca.vcloud_session.get_vcloud_headers()
2928 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2929 try:
2930 response = Http.post(url=add_vdc_rest_url,
2931 headers=headers,
2932 data=data,
2933 verify=vca.verify,
2934 logger=vca.logger)
2935
2936 if response.status_code != 201:
2937 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2938 .format(response.status_code,response.content))
2939 else:
2940 network = networkType.parseString(response.content, True)
2941 create_nw_task = network.get_Tasks().get_Task()[0]
2942
2943 # if we all ok we respond with content after network creation completes
2944 # otherwise by default return None
2945 if create_nw_task is not None:
2946 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2947 status = vca.block_until_completed(create_nw_task)
2948 if status:
2949 return response.content
2950 else:
2951 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2952 .format(response.content))
2953 except Exception as exp:
2954 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2955
2956 return None
2957
2958 def convert_cidr_to_netmask(self, cidr_ip=None):
2959 """
2960 Method sets convert CIDR netmask address to normal IP format
2961 Args:
2962 cidr_ip : CIDR IP address
2963 Returns:
2964 netmask : Converted netmask
2965 """
2966 if cidr_ip is not None:
2967 if '/' in cidr_ip:
2968 network, net_bits = cidr_ip.split('/')
2969 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2970 else:
2971 netmask = cidr_ip
2972 return netmask
2973 return None
2974
2975 def get_provider_rest(self, vca=None):
2976 """
2977 Method gets provider vdc view from vcloud director
2978
2979 Args:
2980 network_name - is network name to be created.
2981 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2982 It optional attribute. by default if no parent network indicate the first available will be used.
2983
2984 Returns:
2985 The return xml content of respond or None
2986 """
2987
2988 url_list = [vca.host, '/api/admin']
2989 response = Http.get(url=''.join(url_list),
2990 headers=vca.vcloud_session.get_vcloud_headers(),
2991 verify=vca.verify,
2992 logger=vca.logger)
2993
2994 if response.status_code == requests.codes.ok:
2995 return response.content
2996 return None
2997
2998 def create_vdc(self, vdc_name=None):
2999
3000 vdc_dict = {}
3001
3002 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3003 if xml_content is not None:
3004 try:
3005 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3006 for child in task_resp_xmlroot:
3007 if child.tag.split("}")[1] == 'Owner':
3008 vdc_id = child.attrib.get('href').split("/")[-1]
3009 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3010 return vdc_dict
3011 except:
3012 self.logger.debug("Respond body {}".format(xml_content))
3013
3014 return None
3015
3016 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3017 """
3018 Method create vdc in vCloud director based on VDC template.
3019 it uses pre-defined template that must be named openmano
3020
3021 Args:
3022 vdc_name - name of a new vdc.
3023
3024 Returns:
3025 The return xml content of respond or None
3026 """
3027
3028 self.logger.info("Creating new vdc {}".format(vdc_name))
3029 vca = self.connect()
3030 if not vca:
3031 raise vimconn.vimconnConnectionException("self.connect() is failed")
3032 if vdc_name is None:
3033 return None
3034
3035 url_list = [vca.host, '/api/vdcTemplates']
3036 vm_list_rest_call = ''.join(url_list)
3037 response = Http.get(url=vm_list_rest_call,
3038 headers=vca.vcloud_session.get_vcloud_headers(),
3039 verify=vca.verify,
3040 logger=vca.logger)
3041
3042 # container url to a template
3043 vdc_template_ref = None
3044 try:
3045 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3046 for child in vm_list_xmlroot:
3047 # application/vnd.vmware.admin.providervdc+xml
3048 # we need find a template from witch we instantiate VDC
3049 if child.tag.split("}")[1] == 'VdcTemplate':
3050 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3051 vdc_template_ref = child.attrib.get('href')
3052 except:
3053 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3054 self.logger.debug("Respond body {}".format(response.content))
3055 return None
3056
3057 # if we didn't found required pre defined template we return None
3058 if vdc_template_ref is None:
3059 return None
3060
3061 try:
3062 # instantiate vdc
3063 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3064 vm_list_rest_call = ''.join(url_list)
3065 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3066 <Source href="{1:s}"></Source>
3067 <Description>opnemano</Description>
3068 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3069 headers = vca.vcloud_session.get_vcloud_headers()
3070 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3071 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3072 logger=vca.logger)
3073
3074 vdc_task = taskType.parseString(response.content, True)
3075 if type(vdc_task) is GenericTask:
3076 self.vca.block_until_completed(vdc_task)
3077
3078 # if we all ok we respond with content otherwise by default None
3079 if response.status_code >= 200 and response.status_code < 300:
3080 return response.content
3081 return None
3082 except:
3083 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3084 self.logger.debug("Respond body {}".format(response.content))
3085
3086 return None
3087
3088 def create_vdc_rest(self, vdc_name=None):
3089 """
3090 Method create network in vCloud director
3091
3092 Args:
3093 network_name - is network name to be created.
3094 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3095 It optional attribute. by default if no parent network indicate the first available will be used.
3096
3097 Returns:
3098 The return network uuid or return None
3099 """
3100
3101 self.logger.info("Creating new vdc {}".format(vdc_name))
3102
3103 vca = self.connect_as_admin()
3104 if not vca:
3105 raise vimconn.vimconnConnectionException("self.connect() is failed")
3106 if vdc_name is None:
3107 return None
3108
3109 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3110 vm_list_rest_call = ''.join(url_list)
3111 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3112 response = Http.get(url=vm_list_rest_call,
3113 headers=vca.vcloud_session.get_vcloud_headers(),
3114 verify=vca.verify,
3115 logger=vca.logger)
3116
3117 provider_vdc_ref = None
3118 add_vdc_rest_url = None
3119 available_networks = None
3120
3121 if response.status_code != requests.codes.ok:
3122 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3123 response.status_code))
3124 return None
3125 else:
3126 try:
3127 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3128 for child in vm_list_xmlroot:
3129 # application/vnd.vmware.admin.providervdc+xml
3130 if child.tag.split("}")[1] == 'Link':
3131 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3132 and child.attrib.get('rel') == 'add':
3133 add_vdc_rest_url = child.attrib.get('href')
3134 except:
3135 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3136 self.logger.debug("Respond body {}".format(response.content))
3137 return None
3138
3139 response = self.get_provider_rest(vca=vca)
3140 try:
3141 vm_list_xmlroot = XmlElementTree.fromstring(response)
3142 for child in vm_list_xmlroot:
3143 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3144 for sub_child in child:
3145 provider_vdc_ref = sub_child.attrib.get('href')
3146 except:
3147 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3148 self.logger.debug("Respond body {}".format(response))
3149 return None
3150
3151 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3152 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3153 <AllocationModel>ReservationPool</AllocationModel>
3154 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3155 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3156 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3157 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3158 <ProviderVdcReference
3159 name="Main Provider"
3160 href="{2:s}" />
3161 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3162 escape(vdc_name),
3163 provider_vdc_ref)
3164
3165 headers = vca.vcloud_session.get_vcloud_headers()
3166 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3167 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3168 logger=vca.logger)
3169
3170 # if we all ok we respond with content otherwise by default None
3171 if response.status_code == 201:
3172 return response.content
3173 return None
3174
3175 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3176 """
3177 Method retrieve vapp detail from vCloud director
3178
3179 Args:
3180 vapp_uuid - is vapp identifier.
3181
3182 Returns:
3183 The return network uuid or return None
3184 """
3185
3186 parsed_respond = {}
3187 vca = None
3188
3189 if need_admin_access:
3190 vca = self.connect_as_admin()
3191 else:
3192 vca = self.vca
3193
3194 if not vca:
3195 raise vimconn.vimconnConnectionException("self.connect() is failed")
3196 if vapp_uuid is None:
3197 return None
3198
3199 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3200 get_vapp_restcall = ''.join(url_list)
3201
3202 if vca.vcloud_session and vca.vcloud_session.organization:
3203 response = Http.get(url=get_vapp_restcall,
3204 headers=vca.vcloud_session.get_vcloud_headers(),
3205 verify=vca.verify,
3206 logger=vca.logger)
3207
3208 if response.status_code == 403:
3209 if need_admin_access == False:
3210 response = self.retry_rest('GET', get_vapp_restcall)
3211
3212 if response.status_code != requests.codes.ok:
3213 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3214 response.status_code))
3215 return parsed_respond
3216
3217 try:
3218 xmlroot_respond = XmlElementTree.fromstring(response.content)
3219 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3220
3221 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3222 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3223 'vmw': 'http://www.vmware.com/schema/ovf',
3224 'vm': 'http://www.vmware.com/vcloud/v1.5',
3225 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3226 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3227 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3228 }
3229
3230 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3231 if created_section is not None:
3232 parsed_respond['created'] = created_section.text
3233
3234 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3235 if network_section is not None and 'networkName' in network_section.attrib:
3236 parsed_respond['networkname'] = network_section.attrib['networkName']
3237
3238 ipscopes_section = \
3239 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3240 namespaces)
3241 if ipscopes_section is not None:
3242 for ipscope in ipscopes_section:
3243 for scope in ipscope:
3244 tag_key = scope.tag.split("}")[1]
3245 if tag_key == 'IpRanges':
3246 ip_ranges = scope.getchildren()
3247 for ipblock in ip_ranges:
3248 for block in ipblock:
3249 parsed_respond[block.tag.split("}")[1]] = block.text
3250 else:
3251 parsed_respond[tag_key] = scope.text
3252
3253 # parse children section for other attrib
3254 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3255 if children_section is not None:
3256 parsed_respond['name'] = children_section.attrib['name']
3257 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3258 if "nestedHypervisorEnabled" in children_section.attrib else None
3259 parsed_respond['deployed'] = children_section.attrib['deployed']
3260 parsed_respond['status'] = children_section.attrib['status']
3261 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3262 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3263 nic_list = []
3264 for adapters in network_adapter:
3265 adapter_key = adapters.tag.split("}")[1]
3266 if adapter_key == 'PrimaryNetworkConnectionIndex':
3267 parsed_respond['primarynetwork'] = adapters.text
3268 if adapter_key == 'NetworkConnection':
3269 vnic = {}
3270 if 'network' in adapters.attrib:
3271 vnic['network'] = adapters.attrib['network']
3272 for adapter in adapters:
3273 setting_key = adapter.tag.split("}")[1]
3274 vnic[setting_key] = adapter.text
3275 nic_list.append(vnic)
3276
3277 for link in children_section:
3278 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3279 if link.attrib['rel'] == 'screen:acquireTicket':
3280 parsed_respond['acquireTicket'] = link.attrib
3281 if link.attrib['rel'] == 'screen:acquireMksTicket':
3282 parsed_respond['acquireMksTicket'] = link.attrib
3283
3284 parsed_respond['interfaces'] = nic_list
3285 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3286 if vCloud_extension_section is not None:
3287 vm_vcenter_info = {}
3288 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3289 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3290 if vmext is not None:
3291 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3292 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3293
3294 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3295 vm_virtual_hardware_info = {}
3296 if virtual_hardware_section is not None:
3297 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3298 if item.find("rasd:Description",namespaces).text == "Hard disk":
3299 disk_size = item.find("rasd:HostResource" ,namespaces
3300 ).attrib["{"+namespaces['vm']+"}capacity"]
3301
3302 vm_virtual_hardware_info["disk_size"]= disk_size
3303 break
3304
3305 for link in virtual_hardware_section:
3306 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3307 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3308 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3309 break
3310
3311 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3312 except Exception as exp :
3313 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3314 return parsed_respond
3315
3316 def acuire_console(self, vm_uuid=None):
3317
3318 if vm_uuid is None:
3319 return None
3320
3321 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3322 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3323 console_dict = vm_dict['acquireTicket']
3324 console_rest_call = console_dict['href']
3325
3326 response = Http.post(url=console_rest_call,
3327 headers=self.vca.vcloud_session.get_vcloud_headers(),
3328 verify=self.vca.verify,
3329 logger=self.vca.logger)
3330 if response.status_code == 403:
3331 response = self.retry_rest('POST', console_rest_call)
3332
3333 if response.status_code == requests.codes.ok:
3334 return response.content
3335
3336 return None
3337
3338 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3339 """
3340 Method retrieve vm disk details
3341
3342 Args:
3343 vapp_uuid - is vapp identifier.
3344 flavor_disk - disk size as specified in VNFD (flavor)
3345
3346 Returns:
3347 The return network uuid or return None
3348 """
3349 status = None
3350 try:
3351 #Flavor disk is in GB convert it into MB
3352 flavor_disk = int(flavor_disk) * 1024
3353 vm_details = self.get_vapp_details_rest(vapp_uuid)
3354 if vm_details:
3355 vm_name = vm_details["name"]
3356 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3357
3358 if vm_details and "vm_virtual_hardware" in vm_details:
3359 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3360 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3361
3362 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3363
3364 if flavor_disk > vm_disk:
3365 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3366 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3367 vm_disk, flavor_disk ))
3368 else:
3369 status = True
3370 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3371
3372 return status
3373 except Exception as exp:
3374 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3375
3376
3377 def modify_vm_disk_rest(self, disk_href , disk_size):
3378 """
3379 Method retrieve modify vm disk size
3380
3381 Args:
3382 disk_href - vCD API URL to GET and PUT disk data
3383 disk_size - disk size as specified in VNFD (flavor)
3384
3385 Returns:
3386 The return network uuid or return None
3387 """
3388 if disk_href is None or disk_size is None:
3389 return None
3390
3391 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3392 response = Http.get(url=disk_href,
3393 headers=self.vca.vcloud_session.get_vcloud_headers(),
3394 verify=self.vca.verify,
3395 logger=self.vca.logger)
3396
3397 if response.status_code == 403:
3398 response = self.retry_rest('GET', disk_href)
3399
3400 if response.status_code != requests.codes.ok:
3401 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3402 response.status_code))
3403 return None
3404 try:
3405 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3406 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3407 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3408
3409 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3410 if item.find("rasd:Description",namespaces).text == "Hard disk":
3411 disk_item = item.find("rasd:HostResource" ,namespaces )
3412 if disk_item is not None:
3413 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3414 break
3415
3416 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3417 xml_declaration=True)
3418
3419 #Send PUT request to modify disk size
3420 headers = self.vca.vcloud_session.get_vcloud_headers()
3421 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3422
3423 response = Http.put(url=disk_href,
3424 data=data,
3425 headers=headers,
3426 verify=self.vca.verify, logger=self.logger)
3427
3428 if response.status_code == 403:
3429 add_headers = {'Content-Type': headers['Content-Type']}
3430 response = self.retry_rest('PUT', disk_href, add_headers, data)
3431
3432 if response.status_code != 202:
3433 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3434 response.status_code))
3435 else:
3436 modify_disk_task = taskType.parseString(response.content, True)
3437 if type(modify_disk_task) is GenericTask:
3438 status = self.vca.block_until_completed(modify_disk_task)
3439 return status
3440
3441 return None
3442
3443 except Exception as exp :
3444 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3445 return None
3446
3447 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3448 """
3449 Method to attach pci devices to VM
3450
3451 Args:
3452 vapp_uuid - uuid of vApp/VM
3453 pci_devices - pci devices infromation as specified in VNFD (flavor)
3454
3455 Returns:
3456 The status of add pci device task , vm object and
3457 vcenter_conect object
3458 """
3459 vm_obj = None
3460 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3461 vcenter_conect, content = self.get_vcenter_content()
3462 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3463
3464 if vm_moref_id:
3465 try:
3466 no_of_pci_devices = len(pci_devices)
3467 if no_of_pci_devices > 0:
3468 #Get VM and its host
3469 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3470 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3471 if host_obj and vm_obj:
3472 #get PCI devies from host on which vapp is currently installed
3473 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3474
3475 if avilable_pci_devices is None:
3476 #find other hosts with active pci devices
3477 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3478 content,
3479 no_of_pci_devices
3480 )
3481
3482 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3483 #Migrate vm to the host where PCI devices are availble
3484 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3485 task = self.relocate_vm(new_host_obj, vm_obj)
3486 if task is not None:
3487 result = self.wait_for_vcenter_task(task, vcenter_conect)
3488 self.logger.info("Migrate VM status: {}".format(result))
3489 host_obj = new_host_obj
3490 else:
3491 self.logger.info("Fail to migrate VM : {}".format(result))
3492 raise vimconn.vimconnNotFoundException(
3493 "Fail to migrate VM : {} to host {}".format(
3494 vmname_andid,
3495 new_host_obj)
3496 )
3497
3498 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3499 #Add PCI devices one by one
3500 for pci_device in avilable_pci_devices:
3501 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3502 if task:
3503 status= self.wait_for_vcenter_task(task, vcenter_conect)
3504 if status:
3505 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3506 else:
3507 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3508 return True, vm_obj, vcenter_conect
3509 else:
3510 self.logger.error("Currently there is no host with"\
3511 " {} number of avaialble PCI devices required for VM {}".format(
3512 no_of_pci_devices,
3513 vmname_andid)
3514 )
3515 raise vimconn.vimconnNotFoundException(
3516 "Currently there is no host with {} "\
3517 "number of avaialble PCI devices required for VM {}".format(
3518 no_of_pci_devices,
3519 vmname_andid))
3520 else:
3521 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3522
3523 except vmodl.MethodFault as error:
3524 self.logger.error("Error occurred while adding PCI devices {} ",error)
3525 return None, vm_obj, vcenter_conect
3526
3527 def get_vm_obj(self, content, mob_id):
3528 """
3529 Method to get the vsphere VM object associated with a given morf ID
3530 Args:
3531 vapp_uuid - uuid of vApp/VM
3532 content - vCenter content object
3533 mob_id - mob_id of VM
3534
3535 Returns:
3536 VM and host object
3537 """
3538 vm_obj = None
3539 host_obj = None
3540 try :
3541 container = content.viewManager.CreateContainerView(content.rootFolder,
3542 [vim.VirtualMachine], True
3543 )
3544 for vm in container.view:
3545 mobID = vm._GetMoId()
3546 if mobID == mob_id:
3547 vm_obj = vm
3548 host_obj = vm_obj.runtime.host
3549 break
3550 except Exception as exp:
3551 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3552 return host_obj, vm_obj
3553
3554 def get_pci_devices(self, host, need_devices):
3555 """
3556 Method to get the details of pci devices on given host
3557 Args:
3558 host - vSphere host object
3559 need_devices - number of pci devices needed on host
3560
3561 Returns:
3562 array of pci devices
3563 """
3564 all_devices = []
3565 all_device_ids = []
3566 used_devices_ids = []
3567
3568 try:
3569 if host:
3570 pciPassthruInfo = host.config.pciPassthruInfo
3571 pciDevies = host.hardware.pciDevice
3572
3573 for pci_status in pciPassthruInfo:
3574 if pci_status.passthruActive:
3575 for device in pciDevies:
3576 if device.id == pci_status.id:
3577 all_device_ids.append(device.id)
3578 all_devices.append(device)
3579
3580 #check if devices are in use
3581 avalible_devices = all_devices
3582 for vm in host.vm:
3583 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3584 vm_devices = vm.config.hardware.device
3585 for device in vm_devices:
3586 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3587 if device.backing.id in all_device_ids:
3588 for use_device in avalible_devices:
3589 if use_device.id == device.backing.id:
3590 avalible_devices.remove(use_device)
3591 used_devices_ids.append(device.backing.id)
3592 self.logger.debug("Device {} from devices {}"\
3593 "is in use".format(device.backing.id,
3594 device)
3595 )
3596 if len(avalible_devices) < need_devices:
3597 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3598 need_devices))
3599 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3600 avalible_devices))
3601 return None
3602 else:
3603 required_devices = avalible_devices[:need_devices]
3604 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3605 len(avalible_devices),
3606 host,
3607 need_devices))
3608 self.logger.info("Retruning {} devices as {}".format(need_devices,
3609 required_devices ))
3610 return required_devices
3611
3612 except Exception as exp:
3613 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3614
3615 return None
3616
3617 def get_host_and_PCIdevices(self, content, need_devices):
3618 """
3619 Method to get the details of pci devices infromation on all hosts
3620
3621 Args:
3622 content - vSphere host object
3623 need_devices - number of pci devices needed on host
3624
3625 Returns:
3626 array of pci devices and host object
3627 """
3628 host_obj = None
3629 pci_device_objs = None
3630 try:
3631 if content:
3632 container = content.viewManager.CreateContainerView(content.rootFolder,
3633 [vim.HostSystem], True)
3634 for host in container.view:
3635 devices = self.get_pci_devices(host, need_devices)
3636 if devices:
3637 host_obj = host
3638 pci_device_objs = devices
3639 break
3640 except Exception as exp:
3641 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3642
3643 return host_obj,pci_device_objs
3644
3645 def relocate_vm(self, dest_host, vm) :
3646 """
3647 Method to get the relocate VM to new host
3648
3649 Args:
3650 dest_host - vSphere host object
3651 vm - vSphere VM object
3652
3653 Returns:
3654 task object
3655 """
3656 task = None
3657 try:
3658 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3659 task = vm.Relocate(relocate_spec)
3660 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3661 except Exception as exp:
3662 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3663 dest_host, vm, exp))
3664 return task
3665
3666 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3667 """
3668 Waits and provides updates on a vSphere task
3669 """
3670 while task.info.state == vim.TaskInfo.State.running:
3671 time.sleep(2)
3672
3673 if task.info.state == vim.TaskInfo.State.success:
3674 if task.info.result is not None and not hideResult:
3675 self.logger.info('{} completed successfully, result: {}'.format(
3676 actionName,
3677 task.info.result))
3678 else:
3679 self.logger.info('Task {} completed successfully.'.format(actionName))
3680 else:
3681 self.logger.error('{} did not complete successfully: {} '.format(
3682 actionName,
3683 task.info.error)
3684 )
3685
3686 return task.info.result
3687
3688 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3689 """
3690 Method to add pci device in given VM
3691
3692 Args:
3693 host_object - vSphere host object
3694 vm_object - vSphere VM object
3695 host_pci_dev - host_pci_dev must be one of the devices from the
3696 host_object.hardware.pciDevice list
3697 which is configured as a PCI passthrough device
3698
3699 Returns:
3700 task object
3701 """
3702 task = None
3703 if vm_object and host_object and host_pci_dev:
3704 try :
3705 #Add PCI device to VM
3706 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3707 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3708
3709 if host_pci_dev.id not in systemid_by_pciid:
3710 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3711 return None
3712
3713 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3714 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3715 id=host_pci_dev.id,
3716 systemId=systemid_by_pciid[host_pci_dev.id],
3717 vendorId=host_pci_dev.vendorId,
3718 deviceName=host_pci_dev.deviceName)
3719
3720 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3721
3722 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3723 new_device_config.operation = "add"
3724 vmConfigSpec = vim.vm.ConfigSpec()
3725 vmConfigSpec.deviceChange = [new_device_config]
3726
3727 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3728 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3729 host_pci_dev, vm_object, host_object)
3730 )
3731 except Exception as exp:
3732 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3733 host_pci_dev,
3734 vm_object,
3735 exp))
3736 return task
3737
3738 def get_vm_vcenter_info(self):
3739 """
3740 Method to get details of vCenter and vm
3741
3742 Args:
3743 vapp_uuid - uuid of vApp or VM
3744
3745 Returns:
3746 Moref Id of VM and deails of vCenter
3747 """
3748 vm_vcenter_info = {}
3749
3750 if self.vcenter_ip is not None:
3751 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3752 else:
3753 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3754 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3755 if self.vcenter_port is not None:
3756 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3757 else:
3758 raise vimconn.vimconnException(message="vCenter port is not provided."\
3759 " Please provide vCenter port while attaching datacenter to tenant in --config")
3760 if self.vcenter_user is not None:
3761 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3762 else:
3763 raise vimconn.vimconnException(message="vCenter user is not provided."\
3764 " Please provide vCenter user while attaching datacenter to tenant in --config")
3765
3766 if self.vcenter_password is not None:
3767 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3768 else:
3769 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3770 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3771
3772 return vm_vcenter_info
3773
3774
3775 def get_vm_pci_details(self, vmuuid):
3776 """
3777 Method to get VM PCI device details from vCenter
3778
3779 Args:
3780 vm_obj - vSphere VM object
3781
3782 Returns:
3783 dict of PCI devives attached to VM
3784
3785 """
3786 vm_pci_devices_info = {}
3787 try:
3788 vcenter_conect, content = self.get_vcenter_content()
3789 vm_moref_id = self.get_vm_moref_id(vmuuid)
3790 if vm_moref_id:
3791 #Get VM and its host
3792 if content:
3793 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3794 if host_obj and vm_obj:
3795 vm_pci_devices_info["host_name"]= host_obj.name
3796 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3797 for device in vm_obj.config.hardware.device:
3798 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3799 device_details={'devide_id':device.backing.id,
3800 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3801 }
3802 vm_pci_devices_info[device.deviceInfo.label] = device_details
3803 else:
3804 self.logger.error("Can not connect to vCenter while getting "\
3805 "PCI devices infromationn")
3806 return vm_pci_devices_info
3807 except Exception as exp:
3808 self.logger.error("Error occurred while getting VM infromationn"\
3809 " for VM : {}".format(exp))
3810 raise vimconn.vimconnException(message=exp)
3811
3812 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3813 """
3814 Method to add network adapter type to vm
3815 Args :
3816 network_name - name of network
3817 primary_nic_index - int value for primary nic index
3818 nicIndex - int value for nic index
3819 nic_type - specify model name to which add to vm
3820 Returns:
3821 None
3822 """
3823
3824 try:
3825 ip_address = None
3826 floating_ip = False
3827 if 'floating_ip' in net: floating_ip = net['floating_ip']
3828
3829 # Stub for ip_address feature
3830 if 'ip_address' in net: ip_address = net['ip_address']
3831
3832 if floating_ip:
3833 allocation_mode = "POOL"
3834 elif ip_address:
3835 allocation_mode = "MANUAL"
3836 else:
3837 allocation_mode = "DHCP"
3838
3839 if not nic_type:
3840 for vms in vapp._get_vms():
3841 vm_id = (vms.id).split(':')[-1]
3842
3843 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3844
3845 response = Http.get(url=url_rest_call,
3846 headers=self.vca.vcloud_session.get_vcloud_headers(),
3847 verify=self.vca.verify,
3848 logger=self.vca.logger)
3849
3850 if response.status_code == 403:
3851 response = self.retry_rest('GET', url_rest_call)
3852
3853 if response.status_code != 200:
3854 self.logger.error("REST call {} failed reason : {}"\
3855 "status code : {}".format(url_rest_call,
3856 response.content,
3857 response.status_code))
3858 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3859 "network connection section")
3860
3861 data = response.content
3862 if '<PrimaryNetworkConnectionIndex>' not in data:
3863 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3864 <NetworkConnection network="{}">
3865 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3866 <IsConnected>true</IsConnected>
3867 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3868 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3869 allocation_mode)
3870 # Stub for ip_address feature
3871 if ip_address:
3872 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3873 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3874
3875 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3876 else:
3877 new_item = """<NetworkConnection network="{}">
3878 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3879 <IsConnected>true</IsConnected>
3880 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3881 </NetworkConnection>""".format(network_name, nicIndex,
3882 allocation_mode)
3883 # Stub for ip_address feature
3884 if ip_address:
3885 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3886 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3887
3888 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3889
3890 headers = self.vca.vcloud_session.get_vcloud_headers()
3891 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3892 response = Http.put(url=url_rest_call, headers=headers, data=data,
3893 verify=self.vca.verify,
3894 logger=self.vca.logger)
3895
3896 if response.status_code == 403:
3897 add_headers = {'Content-Type': headers['Content-Type']}
3898 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3899
3900 if response.status_code != 202:
3901 self.logger.error("REST call {} failed reason : {}"\
3902 "status code : {} ".format(url_rest_call,
3903 response.content,
3904 response.status_code))
3905 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3906 "network connection section")
3907 else:
3908 nic_task = taskType.parseString(response.content, True)
3909 if isinstance(nic_task, GenericTask):
3910 self.vca.block_until_completed(nic_task)
3911 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3912 "default NIC type".format(vm_id))
3913 else:
3914 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3915 "connect NIC type".format(vm_id))
3916 else:
3917 for vms in vapp._get_vms():
3918 vm_id = (vms.id).split(':')[-1]
3919
3920 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3921
3922 response = Http.get(url=url_rest_call,
3923 headers=self.vca.vcloud_session.get_vcloud_headers(),
3924 verify=self.vca.verify,
3925 logger=self.vca.logger)
3926
3927 if response.status_code == 403:
3928 response = self.retry_rest('GET', url_rest_call)
3929
3930 if response.status_code != 200:
3931 self.logger.error("REST call {} failed reason : {}"\
3932 "status code : {}".format(url_rest_call,
3933 response.content,
3934 response.status_code))
3935 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3936 "network connection section")
3937 data = response.content
3938 if '<PrimaryNetworkConnectionIndex>' not in data:
3939 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3940 <NetworkConnection network="{}">
3941 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3942 <IsConnected>true</IsConnected>
3943 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3944 <NetworkAdapterType>{}</NetworkAdapterType>
3945 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3946 allocation_mode, nic_type)
3947 # Stub for ip_address feature
3948 if ip_address:
3949 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3950 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3951
3952 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3953 else:
3954 new_item = """<NetworkConnection network="{}">
3955 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3956 <IsConnected>true</IsConnected>
3957 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3958 <NetworkAdapterType>{}</NetworkAdapterType>
3959 </NetworkConnection>""".format(network_name, nicIndex,
3960 allocation_mode, nic_type)
3961 # Stub for ip_address feature
3962 if ip_address:
3963 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3964 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3965
3966 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3967
3968 headers = self.vca.vcloud_session.get_vcloud_headers()
3969 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3970 response = Http.put(url=url_rest_call, headers=headers, data=data,
3971 verify=self.vca.verify,
3972 logger=self.vca.logger)
3973
3974 if response.status_code == 403:
3975 add_headers = {'Content-Type': headers['Content-Type']}
3976 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3977
3978 if response.status_code != 202:
3979 self.logger.error("REST call {} failed reason : {}"\
3980 "status code : {}".format(url_rest_call,
3981 response.content,
3982 response.status_code))
3983 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3984 "network connection section")
3985 else:
3986 nic_task = taskType.parseString(response.content, True)
3987 if isinstance(nic_task, GenericTask):
3988 self.vca.block_until_completed(nic_task)
3989 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3990 "conneced to NIC type {}".format(vm_id, nic_type))
3991 else:
3992 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3993 "failed to connect NIC type {}".format(vm_id, nic_type))
3994 except Exception as exp:
3995 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3996 "while adding Network adapter")
3997 raise vimconn.vimconnException(message=exp)
3998
3999
4000 def set_numa_affinity(self, vmuuid, paired_threads_id):
4001 """
4002 Method to assign numa affinity in vm configuration parammeters
4003 Args :
4004 vmuuid - vm uuid
4005 paired_threads_id - one or more virtual processor
4006 numbers
4007 Returns:
4008 return if True
4009 """
4010 try:
4011 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
4012 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
4013 context = None
4014 if hasattr(ssl, '_create_unverified_context'):
4015 context = ssl._create_unverified_context()
4016 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
4017 pwd=self.passwd, port=int(vm_vcenter_port),
4018 sslContext=context)
4019 atexit.register(Disconnect, vcenter_conect)
4020 content = vcenter_conect.RetrieveContent()
4021
4022 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4023 if vm_obj:
4024 config_spec = vim.vm.ConfigSpec()
4025 config_spec.extraConfig = []
4026 opt = vim.option.OptionValue()
4027 opt.key = 'numa.nodeAffinity'
4028 opt.value = str(paired_threads_id)
4029 config_spec.extraConfig.append(opt)
4030 task = vm_obj.ReconfigVM_Task(config_spec)
4031 if task:
4032 result = self.wait_for_vcenter_task(task, vcenter_conect)
4033 extra_config = vm_obj.config.extraConfig
4034 flag = False
4035 for opts in extra_config:
4036 if 'numa.nodeAffinity' in opts.key:
4037 flag = True
4038 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4039 "value {} for vm {}".format(opt.value, vm_obj))
4040 if flag:
4041 return
4042 else:
4043 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4044 except Exception as exp:
4045 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4046 "for VM {} : {}".format(vm_obj, vm_moref_id))
4047 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4048 "affinity".format(exp))
4049
4050
4051 def cloud_init(self, vapp, cloud_config):
4052 """
4053 Method to inject ssh-key
4054 vapp - vapp object
4055 cloud_config a dictionary with:
4056 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4057 'users': (optional) list of users to be inserted, each item is a dict with:
4058 'name': (mandatory) user name,
4059 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4060 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4061 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4062 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4063 'dest': (mandatory) string with the destination absolute path
4064 'encoding': (optional, by default text). Can be one of:
4065 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4066 'content' (mandatory): string with the content of the file
4067 'permissions': (optional) string with file permissions, typically octal notation '0644'
4068 'owner': (optional) file owner, string with the format 'owner:group'
4069 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4070 """
4071 try:
4072 if not isinstance(cloud_config, dict):
4073 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4074 else:
4075 key_pairs = []
4076 userdata = []
4077 if "key-pairs" in cloud_config:
4078 key_pairs = cloud_config["key-pairs"]
4079
4080 if "users" in cloud_config:
4081 userdata = cloud_config["users"]
4082
4083 self.logger.debug("cloud_init : Guest os customization started..")
4084 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4085 self.guest_customization(vapp, customize_script)
4086
4087 except Exception as exp:
4088 self.logger.error("cloud_init : exception occurred while injecting "\
4089 "ssh-key")
4090 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4091 "ssh-key".format(exp))
4092
4093 def format_script(self, key_pairs=[], users_list=[]):
4094 bash_script = """
4095 #!/bin/bash
4096 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4097 if [ "$1" = "precustomization" ];then
4098 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4099 """
4100
4101 keys = "\n".join(key_pairs)
4102 if keys:
4103 keys_data = """
4104 if [ ! -d /root/.ssh ];then
4105 mkdir /root/.ssh
4106 chown root:root /root/.ssh
4107 chmod 700 /root/.ssh
4108 touch /root/.ssh/authorized_keys
4109 chown root:root /root/.ssh/authorized_keys
4110 chmod 600 /root/.ssh/authorized_keys
4111 # make centos with selinux happy
4112 which restorecon && restorecon -Rv /root/.ssh
4113 else
4114 touch /root/.ssh/authorized_keys
4115 chown root:root /root/.ssh/authorized_keys
4116 chmod 600 /root/.ssh/authorized_keys
4117 fi
4118 echo '{key}' >> /root/.ssh/authorized_keys
4119 """.format(key=keys)
4120
4121 bash_script+= keys_data
4122
4123 for user in users_list:
4124 if 'name' in user: user_name = user['name']
4125 if 'key-pairs' in user:
4126 user_keys = "\n".join(user['key-pairs'])
4127 else:
4128 user_keys = None
4129
4130 add_user_name = """
4131 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4132 """.format(user_name=user_name)
4133
4134 bash_script+= add_user_name
4135
4136 if user_keys:
4137 user_keys_data = """
4138 mkdir /home/{user_name}/.ssh
4139 chown {user_name}:{user_name} /home/{user_name}/.ssh
4140 chmod 700 /home/{user_name}/.ssh
4141 touch /home/{user_name}/.ssh/authorized_keys
4142 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4143 chmod 600 /home/{user_name}/.ssh/authorized_keys
4144 # make centos with selinux happy
4145 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4146 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4147 """.format(user_name=user_name,user_key=user_keys)
4148
4149 bash_script+= user_keys_data
4150
4151 return bash_script+"\n\tfi"
4152
4153 def guest_customization(self, vapp, customize_script):
4154 """
4155 Method to customize guest os
4156 vapp - Vapp object
4157 customize_script - Customize script to be run at first boot of VM.
4158 """
4159 for vm in vapp._get_vms():
4160 vm_name = vm.name
4161 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4162 if isinstance(task, GenericTask):
4163 self.vca.block_until_completed(task)
4164 self.logger.info("guest_customization : customized guest os task "\
4165 "completed for VM {}".format(vm_name))
4166 else:
4167 self.logger.error("guest_customization : task for customized guest os"\
4168 "failed for VM {}".format(vm_name))
4169 raise vimconn.vimconnException("guest_customization : failed to perform"\
4170 "guest os customization on VM {}".format(vm_name))
4171
4172 def add_new_disk(self, vapp_uuid, disk_size):
4173 """
4174 Method to create an empty vm disk
4175
4176 Args:
4177 vapp_uuid - is vapp identifier.
4178 disk_size - size of disk to be created in GB
4179
4180 Returns:
4181 None
4182 """
4183 status = False
4184 vm_details = None
4185 try:
4186 #Disk size in GB, convert it into MB
4187 if disk_size is not None:
4188 disk_size_mb = int(disk_size) * 1024
4189 vm_details = self.get_vapp_details_rest(vapp_uuid)
4190
4191 if vm_details and "vm_virtual_hardware" in vm_details:
4192 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4193 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4194 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4195
4196 except Exception as exp:
4197 msg = "Error occurred while creating new disk {}.".format(exp)
4198 self.rollback_newvm(vapp_uuid, msg)
4199
4200 if status:
4201 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4202 else:
4203 #If failed to add disk, delete VM
4204 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4205 self.rollback_newvm(vapp_uuid, msg)
4206
4207
4208 def add_new_disk_rest(self, disk_href, disk_size_mb):
4209 """
4210 Retrives vApp Disks section & add new empty disk
4211
4212 Args:
4213 disk_href: Disk section href to addd disk
4214 disk_size_mb: Disk size in MB
4215
4216 Returns: Status of add new disk task
4217 """
4218 status = False
4219 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4220 response = Http.get(url=disk_href,
4221 headers=self.vca.vcloud_session.get_vcloud_headers(),
4222 verify=self.vca.verify,
4223 logger=self.vca.logger)
4224
4225 if response.status_code == 403:
4226 response = self.retry_rest('GET', disk_href)
4227
4228 if response.status_code != requests.codes.ok:
4229 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4230 .format(disk_href, response.status_code))
4231 return status
4232 try:
4233 #Find but type & max of instance IDs assigned to disks
4234 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4235 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4236 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4237 instance_id = 0
4238 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4239 if item.find("rasd:Description",namespaces).text == "Hard disk":
4240 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4241 if inst_id > instance_id:
4242 instance_id = inst_id
4243 disk_item = item.find("rasd:HostResource" ,namespaces)
4244 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4245 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4246
4247 instance_id = instance_id + 1
4248 new_item = """<Item>
4249 <rasd:Description>Hard disk</rasd:Description>
4250 <rasd:ElementName>New disk</rasd:ElementName>
4251 <rasd:HostResource
4252 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4253 vcloud:capacity="{}"
4254 vcloud:busSubType="{}"
4255 vcloud:busType="{}"></rasd:HostResource>
4256 <rasd:InstanceID>{}</rasd:InstanceID>
4257 <rasd:ResourceType>17</rasd:ResourceType>
4258 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4259
4260 new_data = response.content
4261 #Add new item at the bottom
4262 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4263
4264 # Send PUT request to modify virtual hardware section with new disk
4265 headers = self.vca.vcloud_session.get_vcloud_headers()
4266 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4267
4268 response = Http.put(url=disk_href,
4269 data=new_data,
4270 headers=headers,
4271 verify=self.vca.verify, logger=self.logger)
4272
4273 if response.status_code == 403:
4274 add_headers = {'Content-Type': headers['Content-Type']}
4275 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4276
4277 if response.status_code != 202:
4278 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4279 .format(disk_href, response.status_code, response.content))
4280 else:
4281 add_disk_task = taskType.parseString(response.content, True)
4282 if type(add_disk_task) is GenericTask:
4283 status = self.vca.block_until_completed(add_disk_task)
4284 if not status:
4285 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4286
4287 except Exception as exp:
4288 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4289
4290 return status
4291
4292
4293 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4294 """
4295 Method to add existing disk to vm
4296 Args :
4297 catalogs - List of VDC catalogs
4298 image_id - Catalog ID
4299 template_name - Name of template in catalog
4300 vapp_uuid - UUID of vApp
4301 Returns:
4302 None
4303 """
4304 disk_info = None
4305 vcenter_conect, content = self.get_vcenter_content()
4306 #find moref-id of vm in image
4307 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4308 image_id=image_id,
4309 )
4310
4311 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4312 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4313 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4314 if catalog_vm_moref_id:
4315 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4316 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4317 if catalog_vm_obj:
4318 #find existing disk
4319 disk_info = self.find_disk(catalog_vm_obj)
4320 else:
4321 exp_msg = "No VM with image id {} found".format(image_id)
4322 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4323 else:
4324 exp_msg = "No Image found with image ID {} ".format(image_id)
4325 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4326
4327 if disk_info:
4328 self.logger.info("Existing disk_info : {}".format(disk_info))
4329 #get VM
4330 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4331 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4332 if vm_obj:
4333 status = self.add_disk(vcenter_conect=vcenter_conect,
4334 vm=vm_obj,
4335 disk_info=disk_info,
4336 size=size,
4337 vapp_uuid=vapp_uuid
4338 )
4339 if status:
4340 self.logger.info("Disk from image id {} added to {}".format(image_id,
4341 vm_obj.config.name)
4342 )
4343 else:
4344 msg = "No disk found with image id {} to add in VM {}".format(
4345 image_id,
4346 vm_obj.config.name)
4347 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4348
4349
4350 def find_disk(self, vm_obj):
4351 """
4352 Method to find details of existing disk in VM
4353 Args :
4354 vm_obj - vCenter object of VM
4355 image_id - Catalog ID
4356 Returns:
4357 disk_info : dict of disk details
4358 """
4359 disk_info = {}
4360 if vm_obj:
4361 try:
4362 devices = vm_obj.config.hardware.device
4363 for device in devices:
4364 if type(device) is vim.vm.device.VirtualDisk:
4365 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4366 disk_info["full_path"] = device.backing.fileName
4367 disk_info["datastore"] = device.backing.datastore
4368 disk_info["capacityKB"] = device.capacityInKB
4369 break
4370 except Exception as exp:
4371 self.logger.error("find_disk() : exception occurred while "\
4372 "getting existing disk details :{}".format(exp))
4373 return disk_info
4374
4375
4376 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4377 """
4378 Method to add existing disk in VM
4379 Args :
4380 vcenter_conect - vCenter content object
4381 vm - vCenter vm object
4382 disk_info : dict of disk details
4383 Returns:
4384 status : status of add disk task
4385 """
4386 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4387 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4388 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4389 if size is not None:
4390 #Convert size from GB to KB
4391 sizeKB = int(size) * 1024 * 1024
4392 #compare size of existing disk and user given size.Assign whicherver is greater
4393 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4394 sizeKB, capacityKB))
4395 if sizeKB > capacityKB:
4396 capacityKB = sizeKB
4397
4398 if datastore and fullpath and capacityKB:
4399 try:
4400 spec = vim.vm.ConfigSpec()
4401 # get all disks on a VM, set unit_number to the next available
4402 unit_number = 0
4403 for dev in vm.config.hardware.device:
4404 if hasattr(dev.backing, 'fileName'):
4405 unit_number = int(dev.unitNumber) + 1
4406 # unit_number 7 reserved for scsi controller
4407 if unit_number == 7:
4408 unit_number += 1
4409 if isinstance(dev, vim.vm.device.VirtualDisk):
4410 #vim.vm.device.VirtualSCSIController
4411 controller_key = dev.controllerKey
4412
4413 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4414 unit_number, controller_key))
4415 # add disk here
4416 dev_changes = []
4417 disk_spec = vim.vm.device.VirtualDeviceSpec()
4418 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4419 disk_spec.device = vim.vm.device.VirtualDisk()
4420 disk_spec.device.backing = \
4421 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4422 disk_spec.device.backing.thinProvisioned = True
4423 disk_spec.device.backing.diskMode = 'persistent'
4424 disk_spec.device.backing.datastore = datastore
4425 disk_spec.device.backing.fileName = fullpath
4426
4427 disk_spec.device.unitNumber = unit_number
4428 disk_spec.device.capacityInKB = capacityKB
4429 disk_spec.device.controllerKey = controller_key
4430 dev_changes.append(disk_spec)
4431 spec.deviceChange = dev_changes
4432 task = vm.ReconfigVM_Task(spec=spec)
4433 status = self.wait_for_vcenter_task(task, vcenter_conect)
4434 return status
4435 except Exception as exp:
4436 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4437 "{} to vm {}".format(exp,
4438 fullpath,
4439 vm.config.name)
4440 self.rollback_newvm(vapp_uuid, exp_msg)
4441 else:
4442 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4443 self.rollback_newvm(vapp_uuid, msg)
4444
4445
4446 def get_vcenter_content(self):
4447 """
4448 Get the vsphere content object
4449 """
4450 try:
4451 vm_vcenter_info = self.get_vm_vcenter_info()
4452 except Exception as exp:
4453 self.logger.error("Error occurred while getting vCenter infromationn"\
4454 " for VM : {}".format(exp))
4455 raise vimconn.vimconnException(message=exp)
4456
4457 context = None
4458 if hasattr(ssl, '_create_unverified_context'):
4459 context = ssl._create_unverified_context()
4460
4461 vcenter_conect = SmartConnect(
4462 host=vm_vcenter_info["vm_vcenter_ip"],
4463 user=vm_vcenter_info["vm_vcenter_user"],
4464 pwd=vm_vcenter_info["vm_vcenter_password"],
4465 port=int(vm_vcenter_info["vm_vcenter_port"]),
4466 sslContext=context
4467 )
4468 atexit.register(Disconnect, vcenter_conect)
4469 content = vcenter_conect.RetrieveContent()
4470 return vcenter_conect, content
4471
4472
4473 def get_vm_moref_id(self, vapp_uuid):
4474 """
4475 Get the moref_id of given VM
4476 """
4477 try:
4478 if vapp_uuid:
4479 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4480 if vm_details and "vm_vcenter_info" in vm_details:
4481 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4482
4483 return vm_moref_id
4484
4485 except Exception as exp:
4486 self.logger.error("Error occurred while getting VM moref ID "\
4487 " for VM : {}".format(exp))
4488 return None
4489
4490
4491 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4492 """
4493 Method to get vApp template details
4494 Args :
4495 catalogs - list of VDC catalogs
4496 image_id - Catalog ID to find
4497 template_name : template name in catalog
4498 Returns:
4499 parsed_respond : dict of vApp tempalte details
4500 """
4501 parsed_response = {}
4502
4503 vca = self.connect_as_admin()
4504 if not vca:
4505 raise vimconn.vimconnConnectionException("self.connect() is failed")
4506
4507 try:
4508 catalog = self.get_catalog_obj(image_id, catalogs)
4509 if catalog:
4510 template_name = self.get_catalogbyid(image_id, catalogs)
4511 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4512 if len(catalog_items) == 1:
4513 response = Http.get(catalog_items[0].get_href(),
4514 headers=vca.vcloud_session.get_vcloud_headers(),
4515 verify=vca.verify,
4516 logger=vca.logger)
4517 catalogItem = XmlElementTree.fromstring(response.content)
4518 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4519 vapp_tempalte_href = entity.get("href")
4520 #get vapp details and parse moref id
4521
4522 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4523 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4524 'vmw': 'http://www.vmware.com/schema/ovf',
4525 'vm': 'http://www.vmware.com/vcloud/v1.5',
4526 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4527 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4528 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4529 }
4530
4531 if vca.vcloud_session and vca.vcloud_session.organization:
4532 response = Http.get(url=vapp_tempalte_href,
4533 headers=vca.vcloud_session.get_vcloud_headers(),
4534 verify=vca.verify,
4535 logger=vca.logger
4536 )
4537
4538 if response.status_code != requests.codes.ok:
4539 self.logger.debug("REST API call {} failed. Return status code {}".format(
4540 vapp_tempalte_href, response.status_code))
4541
4542 else:
4543 xmlroot_respond = XmlElementTree.fromstring(response.content)
4544 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4545 if children_section is not None:
4546 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4547 if vCloud_extension_section is not None:
4548 vm_vcenter_info = {}
4549 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4550 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4551 if vmext is not None:
4552 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4553 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4554
4555 except Exception as exp :
4556 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4557
4558 return parsed_response
4559
4560
4561 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4562 """
4563 Method to delete vApp
4564 Args :
4565 vapp_uuid - vApp UUID
4566 msg - Error message to be logged
4567 exp_type : Exception type
4568 Returns:
4569 None
4570 """
4571 if vapp_uuid:
4572 status = self.delete_vminstance(vapp_uuid)
4573 else:
4574 msg = "No vApp ID"
4575 self.logger.error(msg)
4576 if exp_type == "Genric":
4577 raise vimconn.vimconnException(msg)
4578 elif exp_type == "NotFound":
4579 raise vimconn.vimconnNotFoundException(message=msg)
4580
4581 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4582 """
4583 Method to attach SRIOV adapters to VM
4584
4585 Args:
4586 vapp_uuid - uuid of vApp/VM
4587 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4588 vmname_andid - vmname
4589
4590 Returns:
4591 The status of add SRIOV adapter task , vm object and
4592 vcenter_conect object
4593 """
4594 vm_obj = None
4595 vcenter_conect, content = self.get_vcenter_content()
4596 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4597
4598 if vm_moref_id:
4599 try:
4600 no_of_sriov_devices = len(sriov_nets)
4601 if no_of_sriov_devices > 0:
4602 #Get VM and its host
4603 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4604 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4605 if host_obj and vm_obj:
4606 #get SRIOV devies from host on which vapp is currently installed
4607 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4608 no_of_sriov_devices,
4609 )
4610
4611 if len(avilable_sriov_devices) == 0:
4612 #find other hosts with active pci devices
4613 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4614 content,
4615 no_of_sriov_devices,
4616 )
4617
4618 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4619 #Migrate vm to the host where SRIOV devices are available
4620 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4621 new_host_obj))
4622 task = self.relocate_vm(new_host_obj, vm_obj)
4623 if task is not None:
4624 result = self.wait_for_vcenter_task(task, vcenter_conect)
4625 self.logger.info("Migrate VM status: {}".format(result))
4626 host_obj = new_host_obj
4627 else:
4628 self.logger.info("Fail to migrate VM : {}".format(result))
4629 raise vimconn.vimconnNotFoundException(
4630 "Fail to migrate VM : {} to host {}".format(
4631 vmname_andid,
4632 new_host_obj)
4633 )
4634
4635 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4636 #Add SRIOV devices one by one
4637 for sriov_net in sriov_nets:
4638 network_name = sriov_net.get('net_id')
4639 dvs_portgr_name = self.create_dvPort_group(network_name)
4640 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
4641 #add vlan ID ,Modify portgroup for vlan ID
4642 self.configure_vlanID(content, vcenter_conect, network_name)
4643
4644 task = self.add_sriov_to_vm(content,
4645 vm_obj,
4646 host_obj,
4647 network_name,
4648 avilable_sriov_devices[0]
4649 )
4650 if task:
4651 status= self.wait_for_vcenter_task(task, vcenter_conect)
4652 if status:
4653 self.logger.info("Added SRIOV {} to VM {}".format(
4654 no_of_sriov_devices,
4655 str(vm_obj)))
4656 else:
4657 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4658 no_of_sriov_devices,
4659 str(vm_obj)))
4660 raise vimconn.vimconnUnexpectedResponse(
4661 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4662 )
4663 return True, vm_obj, vcenter_conect
4664 else:
4665 self.logger.error("Currently there is no host with"\
4666 " {} number of avaialble SRIOV "\
4667 "VFs required for VM {}".format(
4668 no_of_sriov_devices,
4669 vmname_andid)
4670 )
4671 raise vimconn.vimconnNotFoundException(
4672 "Currently there is no host with {} "\
4673 "number of avaialble SRIOV devices required for VM {}".format(
4674 no_of_sriov_devices,
4675 vmname_andid))
4676 else:
4677 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4678
4679 except vmodl.MethodFault as error:
4680 self.logger.error("Error occurred while adding SRIOV {} ",error)
4681 return None, vm_obj, vcenter_conect
4682
4683
4684 def get_sriov_devices(self,host, no_of_vfs):
4685 """
4686 Method to get the details of SRIOV devices on given host
4687 Args:
4688 host - vSphere host object
4689 no_of_vfs - number of VFs needed on host
4690
4691 Returns:
4692 array of SRIOV devices
4693 """
4694 sriovInfo=[]
4695 if host:
4696 for device in host.config.pciPassthruInfo:
4697 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4698 if device.numVirtualFunction >= no_of_vfs:
4699 sriovInfo.append(device)
4700 break
4701 return sriovInfo
4702
4703
4704 def get_host_and_sriov_devices(self, content, no_of_vfs):
4705 """
4706 Method to get the details of SRIOV devices infromation on all hosts
4707
4708 Args:
4709 content - vSphere host object
4710 no_of_vfs - number of pci VFs needed on host
4711
4712 Returns:
4713 array of SRIOV devices and host object
4714 """
4715 host_obj = None
4716 sriov_device_objs = None
4717 try:
4718 if content:
4719 container = content.viewManager.CreateContainerView(content.rootFolder,
4720 [vim.HostSystem], True)
4721 for host in container.view:
4722 devices = self.get_sriov_devices(host, no_of_vfs)
4723 if devices:
4724 host_obj = host
4725 sriov_device_objs = devices
4726 break
4727 except Exception as exp:
4728 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4729
4730 return host_obj,sriov_device_objs
4731
4732
4733 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4734 """
4735 Method to add SRIOV adapter to vm
4736
4737 Args:
4738 host_obj - vSphere host object
4739 vm_obj - vSphere vm object
4740 content - vCenter content object
4741 network_name - name of distributed virtaul portgroup
4742 sriov_device - SRIOV device info
4743
4744 Returns:
4745 task object
4746 """
4747 devices = []
4748 vnic_label = "sriov nic"
4749 try:
4750 dvs_portgr = self.get_dvport_group(network_name)
4751 network_name = dvs_portgr.name
4752 nic = vim.vm.device.VirtualDeviceSpec()
4753 # VM device
4754 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4755 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4756 nic.device.addressType = 'assigned'
4757 #nic.device.key = 13016
4758 nic.device.deviceInfo = vim.Description()
4759 nic.device.deviceInfo.label = vnic_label
4760 nic.device.deviceInfo.summary = network_name
4761 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4762
4763 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4764 nic.device.backing.deviceName = network_name
4765 nic.device.backing.useAutoDetect = False
4766 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4767 nic.device.connectable.startConnected = True
4768 nic.device.connectable.allowGuestControl = True
4769
4770 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4771 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4772 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4773
4774 devices.append(nic)
4775 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4776 task = vm_obj.ReconfigVM_Task(vmconf)
4777 return task
4778 except Exception as exp:
4779 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4780 return None
4781
4782
4783 def create_dvPort_group(self, network_name):
4784 """
4785 Method to create disributed virtual portgroup
4786
4787 Args:
4788 network_name - name of network/portgroup
4789
4790 Returns:
4791 portgroup key
4792 """
4793 try:
4794 new_network_name = [network_name, '-', str(uuid.uuid4())]
4795 network_name=''.join(new_network_name)
4796 vcenter_conect, content = self.get_vcenter_content()
4797
4798 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4799 if dv_switch:
4800 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4801 dv_pg_spec.name = network_name
4802
4803 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4804 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4805 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4806 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4807 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4808 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4809
4810 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4811 self.wait_for_vcenter_task(task, vcenter_conect)
4812
4813 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4814 if dvPort_group:
4815 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4816 return dvPort_group.key
4817 else:
4818 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4819
4820 except Exception as exp:
4821 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4822 " : {}".format(network_name, exp))
4823 return None
4824
4825 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4826 """
4827 Method to reconfigure disributed virtual portgroup
4828
4829 Args:
4830 dvPort_group_name - name of disributed virtual portgroup
4831 content - vCenter content object
4832 config_info - disributed virtual portgroup configuration
4833
4834 Returns:
4835 task object
4836 """
4837 try:
4838 dvPort_group = self.get_dvport_group(dvPort_group_name)
4839 if dvPort_group:
4840 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4841 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4842 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4843 if "vlanID" in config_info:
4844 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4845 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4846
4847 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4848 return task
4849 else:
4850 return None
4851 except Exception as exp:
4852 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4853 " : {}".format(dvPort_group_name, exp))
4854 return None
4855
4856
4857 def destroy_dvport_group(self , dvPort_group_name):
4858 """
4859 Method to destroy disributed virtual portgroup
4860
4861 Args:
4862 network_name - name of network/portgroup
4863
4864 Returns:
4865 True if portgroup successfully got deleted else false
4866 """
4867 vcenter_conect, content = self.get_vcenter_content()
4868 try:
4869 status = None
4870 dvPort_group = self.get_dvport_group(dvPort_group_name)
4871 if dvPort_group:
4872 task = dvPort_group.Destroy_Task()
4873 status = self.wait_for_vcenter_task(task, vcenter_conect)
4874 return status
4875 except vmodl.MethodFault as exp:
4876 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4877 exp, dvPort_group_name))
4878 return None
4879
4880
4881 def get_dvport_group(self, dvPort_group_name):
4882 """
4883 Method to get disributed virtual portgroup
4884
4885 Args:
4886 network_name - name of network/portgroup
4887
4888 Returns:
4889 portgroup object
4890 """
4891 vcenter_conect, content = self.get_vcenter_content()
4892 dvPort_group = None
4893 try:
4894 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4895 for item in container.view:
4896 if item.key == dvPort_group_name:
4897 dvPort_group = item
4898 break
4899 return dvPort_group
4900 except vmodl.MethodFault as exp:
4901 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4902 exp, dvPort_group_name))
4903 return None
4904
4905 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4906 """
4907 Method to get disributed virtual portgroup vlanID
4908
4909 Args:
4910 network_name - name of network/portgroup
4911
4912 Returns:
4913 vlan ID
4914 """
4915 vlanId = None
4916 try:
4917 dvPort_group = self.get_dvport_group(dvPort_group_name)
4918 if dvPort_group:
4919 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4920 except vmodl.MethodFault as exp:
4921 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4922 exp, dvPort_group_name))
4923 return vlanId
4924
4925
4926 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4927 """
4928 Method to configure vlanID in disributed virtual portgroup vlanID
4929
4930 Args:
4931 network_name - name of network/portgroup
4932
4933 Returns:
4934 None
4935 """
4936 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4937 if vlanID == 0:
4938 #configure vlanID
4939 vlanID = self.genrate_vlanID(dvPort_group_name)
4940 config = {"vlanID":vlanID}
4941 task = self.reconfig_portgroup(content, dvPort_group_name,
4942 config_info=config)
4943 if task:
4944 status= self.wait_for_vcenter_task(task, vcenter_conect)
4945 if status:
4946 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4947 dvPort_group_name,vlanID))
4948 else:
4949 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4950 dvPort_group_name, vlanID))
4951
4952
4953 def genrate_vlanID(self, network_name):
4954 """
4955 Method to get unused vlanID
4956 Args:
4957 network_name - name of network/portgroup
4958 Returns:
4959 vlanID
4960 """
4961 vlan_id = None
4962 used_ids = []
4963 if self.config.get('vlanID_range') == None:
4964 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4965 "at config value before creating sriov network with vlan tag")
4966 if "used_vlanIDs" not in self.persistent_info:
4967 self.persistent_info["used_vlanIDs"] = {}
4968 else:
4969 used_ids = self.persistent_info["used_vlanIDs"].values()
4970
4971 for vlanID_range in self.config.get('vlanID_range'):
4972 start_vlanid , end_vlanid = vlanID_range.split("-")
4973 if start_vlanid > end_vlanid:
4974 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4975 vlanID_range))
4976
4977 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4978 if id not in used_ids:
4979 vlan_id = id
4980 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4981 return vlan_id
4982 if vlan_id is None:
4983 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4984
4985
4986 def get_obj(self, content, vimtype, name):
4987 """
4988 Get the vsphere object associated with a given text name
4989 """
4990 obj = None
4991 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4992 for item in container.view:
4993 if item.name == name:
4994 obj = item
4995 break
4996 return obj
4997
4998
4999 def insert_media_to_vm(self, vapp, image_id):
5000 """
5001 Method to insert media CD-ROM (ISO image) from catalog to vm.
5002 vapp - vapp object to get vm id
5003 Image_id - image id for cdrom to be inerted to vm
5004 """
5005 # create connection object
5006 vca = self.connect()
5007 try:
5008 # fetching catalog details
5009 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5010 response = Http.get(url=rest_url,
5011 headers=vca.vcloud_session.get_vcloud_headers(),
5012 verify=vca.verify,
5013 logger=vca.logger)
5014
5015 if response.status_code != 200:
5016 self.logger.error("REST call {} failed reason : {}"\
5017 "status code : {}".format(url_rest_call,
5018 response.content,
5019 response.status_code))
5020 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5021 "catalog details")
5022 # searching iso name and id
5023 iso_name,media_id = self.get_media_details(vca, response.content)
5024
5025 if iso_name and media_id:
5026 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5027 <ns6:MediaInsertOrEjectParams
5028 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5029 <ns6:Media
5030 type="application/vnd.vmware.vcloud.media+xml"
5031 name="{}.iso"
5032 id="urn:vcloud:media:{}"
5033 href="https://{}/api/media/{}"/>
5034 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5035 vca.host,media_id)
5036
5037 for vms in vapp._get_vms():
5038 vm_id = (vms.id).split(':')[-1]
5039
5040 headers = vca.vcloud_session.get_vcloud_headers()
5041 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5042 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5043
5044 response = Http.post(url=rest_url,
5045 headers=headers,
5046 data=data,
5047 verify=vca.verify,
5048 logger=vca.logger)
5049
5050 if response.status_code != 202:
5051 self.logger.error("Failed to insert CD-ROM to vm")
5052 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5053 "ISO image to vm")
5054 else:
5055 task = taskType.parseString(response.content, True)
5056 if isinstance(task, GenericTask):
5057 vca.block_until_completed(task)
5058 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5059 " image to vm {}".format(vm_id))
5060 except Exception as exp:
5061 self.logger.error("insert_media_to_vm() : exception occurred "\
5062 "while inserting media CD-ROM")
5063 raise vimconn.vimconnException(message=exp)
5064
5065
5066 def get_media_details(self, vca, content):
5067 """
5068 Method to get catalog item details
5069 vca - connection object
5070 content - Catalog details
5071 Return - Media name, media id
5072 """
5073 cataloghref_list = []
5074 try:
5075 if content:
5076 vm_list_xmlroot = XmlElementTree.fromstring(content)
5077 for child in vm_list_xmlroot.iter():
5078 if 'CatalogItem' in child.tag:
5079 cataloghref_list.append(child.attrib.get('href'))
5080 if cataloghref_list is not None:
5081 for href in cataloghref_list:
5082 if href:
5083 response = Http.get(url=href,
5084 headers=vca.vcloud_session.get_vcloud_headers(),
5085 verify=vca.verify,
5086 logger=vca.logger)
5087 if response.status_code != 200:
5088 self.logger.error("REST call {} failed reason : {}"\
5089 "status code : {}".format(href,
5090 response.content,
5091 response.status_code))
5092 raise vimconn.vimconnException("get_media_details : Failed to get "\
5093 "catalogitem details")
5094 list_xmlroot = XmlElementTree.fromstring(response.content)
5095 for child in list_xmlroot.iter():
5096 if 'Entity' in child.tag:
5097 if 'media' in child.attrib.get('href'):
5098 name = child.attrib.get('name')
5099 media_id = child.attrib.get('href').split('/').pop()
5100 return name,media_id
5101 else:
5102 self.logger.debug("Media name and id not found")
5103 return False,False
5104 except Exception as exp:
5105 self.logger.error("get_media_details : exception occurred "\
5106 "getting media details")
5107 raise vimconn.vimconnException(message=exp)
5108
5109
5110 def retry_rest(self, method, url, add_headers=None, data=None):
5111 """ Method to get Token & retry respective REST request
5112 Args:
5113 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5114 url - request url to be used
5115 add_headers - Additional headers (optional)
5116 data - Request payload data to be passed in request
5117 Returns:
5118 response - Response of request
5119 """
5120 response = None
5121
5122 #Get token
5123 self.get_token()
5124
5125 headers=self.vca.vcloud_session.get_vcloud_headers()
5126
5127 if add_headers:
5128 headers.update(add_headers)
5129
5130 if method == 'GET':
5131 response = Http.get(url=url,
5132 headers=headers,
5133 verify=self.vca.verify,
5134 logger=self.vca.logger)
5135 elif method == 'PUT':
5136 response = Http.put(url=url,
5137 data=data,
5138 headers=headers,
5139 verify=self.vca.verify,
5140 logger=self.logger)
5141 elif method == 'POST':
5142 response = Http.post(url=url,
5143 headers=headers,
5144 data=data,
5145 verify=self.vca.verify,
5146 logger=self.vca.logger)
5147 elif method == 'DELETE':
5148 response = Http.delete(url=url,
5149 headers=headers,
5150 verify=self.vca.verify,
5151 logger=self.vca.logger)
5152 return response
5153
5154
5155 def get_token(self):
5156 """ Generate a new token if expired
5157
5158 Returns:
5159 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5160 """
5161 vca = None
5162
5163 try:
5164 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5165 self.user,
5166 self.org_name))
5167 vca = VCA(host=self.url,
5168 username=self.user,
5169 service_type=STANDALONE,
5170 version=VCAVERSION,
5171 verify=False,
5172 log=False)
5173
5174 result = vca.login(password=self.passwd, org=self.org_name)
5175 if result is True:
5176 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5177 if result is True:
5178 self.logger.info(
5179 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5180 #Update vca
5181 self.vca = vca
5182 return
5183
5184 except:
5185 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5186 "{} as user: {}".format(self.org_name, self.user))
5187
5188 if not vca or not result:
5189 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5190
5191
5192 def get_vdc_details(self):
5193 """ Get VDC details using pyVcloud Lib
5194
5195 Returns vdc object
5196 """
5197 vdc = self.vca.get_vdc(self.tenant_name)
5198
5199 #Retry once, if failed by refreshing token
5200 if vdc is None:
5201 self.get_token()
5202 vdc = self.vca.get_vdc(self.tenant_name)
5203
5204 return vdc
5205
5206