More Py3 conversion work
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45
46 #from pyvcloud import Http
47 import http.client as Http
48
49 from pyvcloud.vcloudair import VCA
50 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
51 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
52 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
53 from xml.sax.saxutils import escape
54
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
56 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
57 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
58 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
59
60 import logging
61 import json
62 import time
63 import uuid
64 import httplib
65 import hashlib
66 import socket
67 import struct
68 import netaddr
69 import random
70
71 # global variable for vcd connector type
72 STANDALONE = 'standalone'
73
74 # key for flavor dicts
75 FLAVOR_RAM_KEY = 'ram'
76 FLAVOR_VCPUS_KEY = 'vcpus'
77 FLAVOR_DISK_KEY = 'disk'
78 DEFAULT_IP_PROFILE = {'dhcp_count':50,
79 'dhcp_enabled':True,
80 'ip_version':"IPv4"
81 }
82 # global variable for wait time
83 INTERVAL_TIME = 5
84 MAX_WAIT_TIME = 1800
85
86 VCAVERSION = '5.9'
87
88 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
89 __date__ = "$12-Jan-2017 11:09:29$"
90 __version__ = '0.1'
91
92 # -1: "Could not be created",
93 # 0: "Unresolved",
94 # 1: "Resolved",
95 # 2: "Deployed",
96 # 3: "Suspended",
97 # 4: "Powered on",
98 # 5: "Waiting for user input",
99 # 6: "Unknown state",
100 # 7: "Unrecognized state",
101 # 8: "Powered off",
102 # 9: "Inconsistent state",
103 # 10: "Children do not all have the same status",
104 # 11: "Upload initiated, OVF descriptor pending",
105 # 12: "Upload initiated, copying contents",
106 # 13: "Upload initiated , disk contents pending",
107 # 14: "Upload has been quarantined",
108 # 15: "Upload quarantine period has expired"
109
110 # mapping vCD status to MANO
111 vcdStatusCode2manoFormat = {4: 'ACTIVE',
112 7: 'PAUSED',
113 3: 'SUSPENDED',
114 8: 'INACTIVE',
115 12: 'BUILD',
116 -1: 'ERROR',
117 14: 'DELETED'}
118
119 #
120 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
121 'ERROR': 'ERROR', 'DELETED': 'DELETED'
122 }
123
124 class vimconnector(vimconn.vimconnector):
125 # dict used to store flavor in memory
126 flavorlist = {}
127
128 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
129 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
130 """
131 Constructor create vmware connector to vCloud director.
132
133 By default construct doesn't validate connection state. So client can create object with None arguments.
134 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
135
136 a) It initialize organization UUID
137 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
138
139 Args:
140 uuid - is organization uuid.
141 name - is organization name that must be presented in vCloud director.
142 tenant_id - is VDC uuid it must be presented in vCloud director
143 tenant_name - is VDC name.
144 url - is hostname or ip address of vCloud director
145 url_admin - same as above.
146 user - is user that administrator for organization. Caller must make sure that
147 username has right privileges.
148
149 password - is password for a user.
150
151 VMware connector also requires PVDC administrative privileges and separate account.
152 This variables must be passed via config argument dict contains keys
153
154 dict['admin_username']
155 dict['admin_password']
156 config - Provide NSX and vCenter information
157
158 Returns:
159 Nothing.
160 """
161
162 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
163 url_admin, user, passwd, log_level, config)
164
165 self.logger = logging.getLogger('openmano.vim.vmware')
166 self.logger.setLevel(10)
167 self.persistent_info = persistent_info
168
169 self.name = name
170 self.id = uuid
171 self.url = url
172 self.url_admin = url_admin
173 self.tenant_id = tenant_id
174 self.tenant_name = tenant_name
175 self.user = user
176 self.passwd = passwd
177 self.config = config
178 self.admin_password = None
179 self.admin_user = None
180 self.org_name = ""
181 self.nsx_manager = None
182 self.nsx_user = None
183 self.nsx_password = None
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 # ############# Stub code for SRIOV #################
217 # try:
218 # self.dvs_name = config['dv_switch_name']
219 # except KeyError:
220 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
221 #
222 # self.vlanID_range = config.get("vlanID_range", None)
223
224 self.org_uuid = None
225 self.vca = None
226
227 if not url:
228 raise vimconn.vimconnException('url param can not be NoneType')
229
230 if not self.url_admin: # try to use normal url
231 self.url_admin = self.url
232
233 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
234 self.tenant_id, self.tenant_name))
235 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
236 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
237
238 # initialize organization
239 if self.user is not None and self.passwd is not None and self.url:
240 self.init_organization()
241
242 def __getitem__(self, index):
243 if index == 'name':
244 return self.name
245 if index == 'tenant_id':
246 return self.tenant_id
247 if index == 'tenant_name':
248 return self.tenant_name
249 elif index == 'id':
250 return self.id
251 elif index == 'org_name':
252 return self.org_name
253 elif index == 'org_uuid':
254 return self.org_uuid
255 elif index == 'user':
256 return self.user
257 elif index == 'passwd':
258 return self.passwd
259 elif index == 'url':
260 return self.url
261 elif index == 'url_admin':
262 return self.url_admin
263 elif index == "config":
264 return self.config
265 else:
266 raise KeyError("Invalid key '%s'" % str(index))
267
268 def __setitem__(self, index, value):
269 if index == 'name':
270 self.name = value
271 if index == 'tenant_id':
272 self.tenant_id = value
273 if index == 'tenant_name':
274 self.tenant_name = value
275 elif index == 'id':
276 self.id = value
277 elif index == 'org_name':
278 self.org_name = value
279 elif index == 'org_uuid':
280 self.org_uuid = value
281 elif index == 'user':
282 self.user = value
283 elif index == 'passwd':
284 self.passwd = value
285 elif index == 'url':
286 self.url = value
287 elif index == 'url_admin':
288 self.url_admin = value
289 else:
290 raise KeyError("Invalid key '%s'" % str(index))
291
292 def connect_as_admin(self):
293 """ Method connect as pvdc admin user to vCloud director.
294 There are certain action that can be done only by provider vdc admin user.
295 Organization creation / provider network creation etc.
296
297 Returns:
298 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
299 """
300
301 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
302
303 vca_admin = VCA(host=self.url,
304 username=self.admin_user,
305 service_type=STANDALONE,
306 version=VCAVERSION,
307 verify=False,
308 log=False)
309 result = vca_admin.login(password=self.admin_password, org='System')
310 if not result:
311 raise vimconn.vimconnConnectionException(
312 "Can't connect to a vCloud director as: {}".format(self.admin_user))
313 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
314 if result is True:
315 self.logger.info(
316 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
317
318 return vca_admin
319
320 def connect(self):
321 """ Method connect as normal user to vCloud director.
322
323 Returns:
324 The return vca object that letter can be used to connect to vCloud director as admin for VDC
325 """
326
327 try:
328 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
329 self.user,
330 self.org_name))
331 vca = VCA(host=self.url,
332 username=self.user,
333 service_type=STANDALONE,
334 version=VCAVERSION,
335 verify=False,
336 log=False)
337
338 result = vca.login(password=self.passwd, org=self.org_name)
339 if not result:
340 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
341 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
342 if result is True:
343 self.logger.info(
344 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
345
346 except:
347 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
348 "{} as user: {}".format(self.org_name, self.user))
349
350 return vca
351
352 def init_organization(self):
353 """ Method initialize organization UUID and VDC parameters.
354
355 At bare minimum client must provide organization name that present in vCloud director and VDC.
356
357 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
358 The Org - UUID will be initialized at the run time if data center present in vCloud director.
359
360 Returns:
361 The return vca object that letter can be used to connect to vcloud direct as admin
362 """
363 vca = self.connect()
364 if not vca:
365 raise vimconn.vimconnConnectionException("self.connect() is failed.")
366
367 self.vca = vca
368 try:
369 if self.org_uuid is None:
370 org_dict = self.get_org_list()
371 for org in org_dict:
372 # we set org UUID at the init phase but we can do it only when we have valid credential.
373 if org_dict[org] == self.org_name:
374 self.org_uuid = org
375 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
376 break
377 else:
378 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
379
380 # if well good we require for org details
381 org_details_dict = self.get_org(org_uuid=self.org_uuid)
382
383 # we have two case if we want to initialize VDC ID or VDC name at run time
384 # tenant_name provided but no tenant id
385 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
386 vdcs_dict = org_details_dict['vdcs']
387 for vdc in vdcs_dict:
388 if vdcs_dict[vdc] == self.tenant_name:
389 self.tenant_id = vdc
390 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
391 self.org_name))
392 break
393 else:
394 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
395 # case two we have tenant_id but we don't have tenant name so we find and set it.
396 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
397 vdcs_dict = org_details_dict['vdcs']
398 for vdc in vdcs_dict:
399 if vdc == self.tenant_id:
400 self.tenant_name = vdcs_dict[vdc]
401 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
402 self.org_name))
403 break
404 else:
405 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
406 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
407 except:
408 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
409 self.logger.debug(traceback.format_exc())
410 self.org_uuid = None
411
412 def new_tenant(self, tenant_name=None, tenant_description=None):
413 """ Method adds a new tenant to VIM with this name.
414 This action requires access to create VDC action in vCloud director.
415
416 Args:
417 tenant_name is tenant_name to be created.
418 tenant_description not used for this call
419
420 Return:
421 returns the tenant identifier in UUID format.
422 If action is failed method will throw vimconn.vimconnException method
423 """
424 vdc_task = self.create_vdc(vdc_name=tenant_name)
425 if vdc_task is not None:
426 vdc_uuid, value = vdc_task.popitem()
427 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
428 return vdc_uuid
429 else:
430 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
431
432 def delete_tenant(self, tenant_id=None):
433 """ Delete a tenant from VIM
434 Args:
435 tenant_id is tenant_id to be deleted.
436
437 Return:
438 returns the tenant identifier in UUID format.
439 If action is failed method will throw exception
440 """
441 vca = self.connect_as_admin()
442 if not vca:
443 raise vimconn.vimconnConnectionException("self.connect() is failed")
444
445 if tenant_id is not None:
446 if vca.vcloud_session and vca.vcloud_session.organization:
447 #Get OrgVDC
448 url_list = [self.vca.host, '/api/vdc/', tenant_id]
449 orgvdc_herf = ''.join(url_list)
450 response = Http.get(url=orgvdc_herf,
451 headers=vca.vcloud_session.get_vcloud_headers(),
452 verify=vca.verify,
453 logger=vca.logger)
454
455 if response.status_code != requests.codes.ok:
456 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
457 "Return status code {}".format(orgvdc_herf,
458 response.status_code))
459 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
460
461 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
462 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
463 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
464 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
465 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
466
467 #Delete OrgVDC
468 response = Http.delete(url=vdc_remove_href,
469 headers=vca.vcloud_session.get_vcloud_headers(),
470 verify=vca.verify,
471 logger=vca.logger)
472
473 if response.status_code == 202:
474 delete_vdc_task = taskType.parseString(response.content, True)
475 if type(delete_vdc_task) is GenericTask:
476 self.vca.block_until_completed(delete_vdc_task)
477 self.logger.info("Deleted tenant with ID {}".format(tenant_id))
478 return tenant_id
479 else:
480 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
481 "Return status code {}".format(vdc_remove_href,
482 response.status_code))
483 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
484 else:
485 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
486 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
487
488
489 def get_tenant_list(self, filter_dict={}):
490 """Obtain tenants of VIM
491 filter_dict can contain the following keys:
492 name: filter by tenant name
493 id: filter by tenant uuid/id
494 <other VIM specific>
495 Returns the tenant list of dictionaries:
496 [{'name':'<name>, 'id':'<id>, ...}, ...]
497
498 """
499 org_dict = self.get_org(self.org_uuid)
500 vdcs_dict = org_dict['vdcs']
501
502 vdclist = []
503 try:
504 for k in vdcs_dict:
505 entry = {'name': vdcs_dict[k], 'id': k}
506 # if caller didn't specify dictionary we return all tenants.
507 if filter_dict is not None and filter_dict:
508 filtered_entry = entry.copy()
509 filtered_dict = set(entry.keys()) - set(filter_dict)
510 for unwanted_key in filtered_dict: del entry[unwanted_key]
511 if filter_dict == entry:
512 vdclist.append(filtered_entry)
513 else:
514 vdclist.append(entry)
515 except:
516 self.logger.debug("Error in get_tenant_list()")
517 self.logger.debug(traceback.format_exc())
518 raise vimconn.vimconnException("Incorrect state. {}")
519
520 return vdclist
521
522 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
523 """Adds a tenant network to VIM
524 net_name is the name
525 net_type can be 'bridge','data'.'ptp'.
526 ip_profile is a dict containing the IP parameters of the network
527 shared is a boolean
528 Returns the network identifier"""
529
530 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
531 .format(net_name, net_type, ip_profile, shared))
532
533 isshared = 'false'
534 if shared:
535 isshared = 'true'
536
537 # ############# Stub code for SRIOV #################
538 # if net_type == "data" or net_type == "ptp":
539 # if self.config.get('dv_switch_name') == None:
540 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
541 # network_uuid = self.create_dvPort_group(net_name)
542
543 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
544 ip_profile=ip_profile, isshared=isshared)
545 if network_uuid is not None:
546 return network_uuid
547 else:
548 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
549
550 def get_vcd_network_list(self):
551 """ Method available organization for a logged in tenant
552
553 Returns:
554 The return vca object that letter can be used to connect to vcloud direct as admin
555 """
556
557 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
558
559 if not self.tenant_name:
560 raise vimconn.vimconnConnectionException("Tenant name is empty.")
561
562 vdc = self.get_vdc_details()
563 if vdc is None:
564 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
565
566 vdc_uuid = vdc.get_id().split(":")[3]
567 networks = self.vca.get_networks(vdc.get_name())
568 network_list = []
569 try:
570 for network in networks:
571 filter_dict = {}
572 netid = network.get_id().split(":")
573 if len(netid) != 4:
574 continue
575
576 filter_dict["name"] = network.get_name()
577 filter_dict["id"] = netid[3]
578 filter_dict["shared"] = network.get_IsShared()
579 filter_dict["tenant_id"] = vdc_uuid
580 if network.get_status() == 1:
581 filter_dict["admin_state_up"] = True
582 else:
583 filter_dict["admin_state_up"] = False
584 filter_dict["status"] = "ACTIVE"
585 filter_dict["type"] = "bridge"
586 network_list.append(filter_dict)
587 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
588 except:
589 self.logger.debug("Error in get_vcd_network_list")
590 self.logger.debug(traceback.format_exc())
591 pass
592
593 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
594 return network_list
595
596 def get_network_list(self, filter_dict={}):
597 """Obtain tenant networks of VIM
598 Filter_dict can be:
599 name: network name OR/AND
600 id: network uuid OR/AND
601 shared: boolean OR/AND
602 tenant_id: tenant OR/AND
603 admin_state_up: boolean
604 status: 'ACTIVE'
605
606 [{key : value , key : value}]
607
608 Returns the network list of dictionaries:
609 [{<the fields at Filter_dict plus some VIM specific>}, ...]
610 List can be empty
611 """
612
613 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
614
615 if not self.tenant_name:
616 raise vimconn.vimconnConnectionException("Tenant name is empty.")
617
618 vdc = self.get_vdc_details()
619 if vdc is None:
620 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
621
622 try:
623 vdcid = vdc.get_id().split(":")[3]
624 networks = self.vca.get_networks(vdc.get_name())
625 network_list = []
626
627 for network in networks:
628 filter_entry = {}
629 net_uuid = network.get_id().split(":")
630 if len(net_uuid) != 4:
631 continue
632 else:
633 net_uuid = net_uuid[3]
634 # create dict entry
635 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
636 vdcid,
637 network.get_name()))
638 filter_entry["name"] = network.get_name()
639 filter_entry["id"] = net_uuid
640 filter_entry["shared"] = network.get_IsShared()
641 filter_entry["tenant_id"] = vdcid
642 if network.get_status() == 1:
643 filter_entry["admin_state_up"] = True
644 else:
645 filter_entry["admin_state_up"] = False
646 filter_entry["status"] = "ACTIVE"
647 filter_entry["type"] = "bridge"
648 filtered_entry = filter_entry.copy()
649
650 if filter_dict is not None and filter_dict:
651 # we remove all the key : value we don't care and match only
652 # respected field
653 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
654 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
655 if filter_dict == filter_entry:
656 network_list.append(filtered_entry)
657 else:
658 network_list.append(filtered_entry)
659 except:
660 self.logger.debug("Error in get_vcd_network_list")
661 self.logger.debug(traceback.format_exc())
662
663 self.logger.debug("Returning {}".format(network_list))
664 return network_list
665
666 def get_network(self, net_id):
667 """Method obtains network details of net_id VIM network
668 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
669
670 try:
671 vdc = self.get_vdc_details()
672 vdc_id = vdc.get_id().split(":")[3]
673
674 networks = self.vca.get_networks(vdc.get_name())
675 filter_dict = {}
676
677 for network in networks:
678 vdc_network_id = network.get_id().split(":")
679 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
680 filter_dict["name"] = network.get_name()
681 filter_dict["id"] = vdc_network_id[3]
682 filter_dict["shared"] = network.get_IsShared()
683 filter_dict["tenant_id"] = vdc_id
684 if network.get_status() == 1:
685 filter_dict["admin_state_up"] = True
686 else:
687 filter_dict["admin_state_up"] = False
688 filter_dict["status"] = "ACTIVE"
689 filter_dict["type"] = "bridge"
690 self.logger.debug("Returning {}".format(filter_dict))
691 return filter_dict
692 except:
693 self.logger.debug("Error in get_network")
694 self.logger.debug(traceback.format_exc())
695
696 return filter_dict
697
698 def delete_network(self, net_id):
699 """
700 Method Deletes a tenant network from VIM, provide the network id.
701
702 Returns the network identifier or raise an exception
703 """
704
705 # ############# Stub code for SRIOV #################
706 # dvport_group = self.get_dvport_group(net_id)
707 # if dvport_group:
708 # #delete portgroup
709 # status = self.destroy_dvport_group(net_id)
710 # if status:
711 # # Remove vlanID from persistent info
712 # if net_id in self.persistent_info["used_vlanIDs"]:
713 # del self.persistent_info["used_vlanIDs"][net_id]
714 #
715 # return net_id
716
717 vcd_network = self.get_vcd_network(network_uuid=net_id)
718 if vcd_network is not None and vcd_network:
719 if self.delete_network_action(network_uuid=net_id):
720 return net_id
721 else:
722 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
723
724 def refresh_nets_status(self, net_list):
725 """Get the status of the networks
726 Params: the list of network identifiers
727 Returns a dictionary with:
728 net_id: #VIM id of this network
729 status: #Mandatory. Text with one of:
730 # DELETED (not found at vim)
731 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
732 # OTHER (Vim reported other status not understood)
733 # ERROR (VIM indicates an ERROR status)
734 # ACTIVE, INACTIVE, DOWN (admin down),
735 # BUILD (on building process)
736 #
737 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
738 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
739
740 """
741
742 dict_entry = {}
743 try:
744 for net in net_list:
745 errormsg = ''
746 vcd_network = self.get_vcd_network(network_uuid=net)
747 if vcd_network is not None and vcd_network:
748 if vcd_network['status'] == '1':
749 status = 'ACTIVE'
750 else:
751 status = 'DOWN'
752 else:
753 status = 'DELETED'
754 errormsg = 'Network not found.'
755
756 dict_entry[net] = {'status': status, 'error_msg': errormsg,
757 'vim_info': yaml.safe_dump(vcd_network)}
758 except:
759 self.logger.debug("Error in refresh_nets_status")
760 self.logger.debug(traceback.format_exc())
761
762 return dict_entry
763
764 def get_flavor(self, flavor_id):
765 """Obtain flavor details from the VIM
766 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
767 """
768 if flavor_id not in vimconnector.flavorlist:
769 raise vimconn.vimconnNotFoundException("Flavor not found.")
770 return vimconnector.flavorlist[flavor_id]
771
772 def new_flavor(self, flavor_data):
773 """Adds a tenant flavor to VIM
774 flavor_data contains a dictionary with information, keys:
775 name: flavor name
776 ram: memory (cloud type) in MBytes
777 vpcus: cpus (cloud type)
778 extended: EPA parameters
779 - numas: #items requested in same NUMA
780 memory: number of 1G huge pages memory
781 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
782 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
783 - name: interface name
784 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
785 bandwidth: X Gbps; requested guarantee bandwidth
786 vpci: requested virtual PCI address
787 disk: disk size
788 is_public:
789 #TODO to concrete
790 Returns the flavor identifier"""
791
792 # generate a new uuid put to internal dict and return it.
793 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
794 new_flavor=flavor_data
795 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
796 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
797 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
798
799 if not isinstance(ram, int):
800 raise vimconn.vimconnException("Non-integer value for ram")
801 elif not isinstance(cpu, int):
802 raise vimconn.vimconnException("Non-integer value for cpu")
803 elif not isinstance(disk, int):
804 raise vimconn.vimconnException("Non-integer value for disk")
805
806 extended_flv = flavor_data.get("extended")
807 if extended_flv:
808 numas=extended_flv.get("numas")
809 if numas:
810 for numa in numas:
811 #overwrite ram and vcpus
812 ram = numa['memory']*1024
813 if 'paired-threads' in numa:
814 cpu = numa['paired-threads']*2
815 elif 'cores' in numa:
816 cpu = numa['cores']
817 elif 'threads' in numa:
818 cpu = numa['threads']
819
820 new_flavor[FLAVOR_RAM_KEY] = ram
821 new_flavor[FLAVOR_VCPUS_KEY] = cpu
822 new_flavor[FLAVOR_DISK_KEY] = disk
823 # generate a new uuid put to internal dict and return it.
824 flavor_id = uuid.uuid4()
825 vimconnector.flavorlist[str(flavor_id)] = new_flavor
826 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
827
828 return str(flavor_id)
829
830 def delete_flavor(self, flavor_id):
831 """Deletes a tenant flavor from VIM identify by its id
832
833 Returns the used id or raise an exception
834 """
835 if flavor_id not in vimconnector.flavorlist:
836 raise vimconn.vimconnNotFoundException("Flavor not found.")
837
838 vimconnector.flavorlist.pop(flavor_id, None)
839 return flavor_id
840
841 def new_image(self, image_dict):
842 """
843 Adds a tenant image to VIM
844 Returns:
845 200, image-id if the image is created
846 <0, message if there is an error
847 """
848
849 return self.get_image_id_from_path(image_dict['location'])
850
851 def delete_image(self, image_id):
852 """
853 Deletes a tenant image from VIM
854 Args:
855 image_id is ID of Image to be deleted
856 Return:
857 returns the image identifier in UUID format or raises an exception on error
858 """
859 vca = self.connect_as_admin()
860 if not vca:
861 raise vimconn.vimconnConnectionException("self.connect() is failed")
862 # Get Catalog details
863 url_list = [self.vca.host, '/api/catalog/', image_id]
864 catalog_herf = ''.join(url_list)
865 response = Http.get(url=catalog_herf,
866 headers=vca.vcloud_session.get_vcloud_headers(),
867 verify=vca.verify,
868 logger=vca.logger)
869
870 if response.status_code != requests.codes.ok:
871 self.logger.debug("delete_image():GET REST API call {} failed. "\
872 "Return status code {}".format(catalog_herf,
873 response.status_code))
874 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
875
876 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
877 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
878 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
879
880 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
881 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
882 for catalogItem in catalogItems:
883 catalogItem_href = catalogItem.attrib['href']
884
885 #GET details of catalogItem
886 response = Http.get(url=catalogItem_href,
887 headers=vca.vcloud_session.get_vcloud_headers(),
888 verify=vca.verify,
889 logger=vca.logger)
890
891 if response.status_code != requests.codes.ok:
892 self.logger.debug("delete_image():GET REST API call {} failed. "\
893 "Return status code {}".format(catalog_herf,
894 response.status_code))
895 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
896 catalogItem,
897 image_id))
898
899 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
900 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
901 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
902 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
903
904 #Remove catalogItem
905 response = Http.delete(url= catalogitem_remove_href,
906 headers=vca.vcloud_session.get_vcloud_headers(),
907 verify=vca.verify,
908 logger=vca.logger)
909 if response.status_code == requests.codes.no_content:
910 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
911 else:
912 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
913
914 #Remove catalog
915 url_list = [self.vca.host, '/api/admin/catalog/', image_id]
916 catalog_remove_herf = ''.join(url_list)
917 response = Http.delete(url= catalog_remove_herf,
918 headers=vca.vcloud_session.get_vcloud_headers(),
919 verify=vca.verify,
920 logger=vca.logger)
921
922 if response.status_code == requests.codes.no_content:
923 self.logger.debug("Deleted Catalog {}".format(image_id))
924 return image_id
925 else:
926 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
927
928
929 def catalog_exists(self, catalog_name, catalogs):
930 """
931
932 :param catalog_name:
933 :param catalogs:
934 :return:
935 """
936 for catalog in catalogs:
937 if catalog.name == catalog_name:
938 return True
939 return False
940
941 def create_vimcatalog(self, vca=None, catalog_name=None):
942 """ Create new catalog entry in vCloud director.
943
944 Args
945 vca: vCloud director.
946 catalog_name catalog that client wish to create. Note no validation done for a name.
947 Client must make sure that provide valid string representation.
948
949 Return (bool) True if catalog created.
950
951 """
952 try:
953 task = vca.create_catalog(catalog_name, catalog_name)
954 result = vca.block_until_completed(task)
955 if not result:
956 return False
957 catalogs = vca.get_catalogs()
958 except:
959 return False
960 return self.catalog_exists(catalog_name, catalogs)
961
962 # noinspection PyIncorrectDocstring
963 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
964 description='', progress=False, chunk_bytes=128 * 1024):
965 """
966 Uploads a OVF file to a vCloud catalog
967
968 :param chunk_bytes:
969 :param progress:
970 :param description:
971 :param image_name:
972 :param vca:
973 :param catalog_name: (str): The name of the catalog to upload the media.
974 :param media_file_name: (str): The name of the local media file to upload.
975 :return: (bool) True if the media file was successfully uploaded, false otherwise.
976 """
977 os.path.isfile(media_file_name)
978 statinfo = os.stat(media_file_name)
979
980 # find a catalog entry where we upload OVF.
981 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
982 # status change.
983 # if VCD can parse OVF we upload VMDK file
984 try:
985 for catalog in vca.get_catalogs():
986 if catalog_name != catalog.name:
987 continue
988 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
989 link.get_rel() == 'add', catalog.get_Link())
990 assert len(link) == 1
991 data = """
992 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
993 """ % (escape(catalog_name), escape(description))
994 headers = vca.vcloud_session.get_vcloud_headers()
995 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
996 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
997 if response.status_code == requests.codes.created:
998 catalogItem = XmlElementTree.fromstring(response.content)
999 entity = [child for child in catalogItem if
1000 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1001 href = entity.get('href')
1002 template = href
1003 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
1004 verify=vca.verify, logger=self.logger)
1005
1006 if response.status_code == requests.codes.ok:
1007 media = mediaType.parseString(response.content, True)
1008 link = filter(lambda link: link.get_rel() == 'upload:default',
1009 media.get_Files().get_File()[0].get_Link())[0]
1010 headers = vca.vcloud_session.get_vcloud_headers()
1011 headers['Content-Type'] = 'Content-Type text/xml'
1012 response = Http.put(link.get_href(),
1013 data=open(media_file_name, 'rb'),
1014 headers=headers,
1015 verify=vca.verify, logger=self.logger)
1016 if response.status_code != requests.codes.ok:
1017 self.logger.debug(
1018 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1019 media_file_name))
1020 return False
1021
1022 # TODO fix this with aync block
1023 time.sleep(5)
1024
1025 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1026
1027 # uploading VMDK file
1028 # check status of OVF upload and upload remaining files.
1029 response = Http.get(template,
1030 headers=vca.vcloud_session.get_vcloud_headers(),
1031 verify=vca.verify,
1032 logger=self.logger)
1033
1034 if response.status_code == requests.codes.ok:
1035 media = mediaType.parseString(response.content, True)
1036 number_of_files = len(media.get_Files().get_File())
1037 for index in xrange(0, number_of_files):
1038 links_list = filter(lambda link: link.get_rel() == 'upload:default',
1039 media.get_Files().get_File()[index].get_Link())
1040 for link in links_list:
1041 # we skip ovf since it already uploaded.
1042 if 'ovf' in link.get_href():
1043 continue
1044 # The OVF file and VMDK must be in a same directory
1045 head, tail = os.path.split(media_file_name)
1046 file_vmdk = head + '/' + link.get_href().split("/")[-1]
1047 if not os.path.isfile(file_vmdk):
1048 return False
1049 statinfo = os.stat(file_vmdk)
1050 if statinfo.st_size == 0:
1051 return False
1052 hrefvmdk = link.get_href()
1053
1054 if progress:
1055 print("Uploading file: {}".format(file_vmdk))
1056 if progress:
1057 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1058 FileTransferSpeed()]
1059 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1060
1061 bytes_transferred = 0
1062 f = open(file_vmdk, 'rb')
1063 while bytes_transferred < statinfo.st_size:
1064 my_bytes = f.read(chunk_bytes)
1065 if len(my_bytes) <= chunk_bytes:
1066 headers = vca.vcloud_session.get_vcloud_headers()
1067 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1068 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1069 headers['Content-Length'] = str(len(my_bytes))
1070 response = Http.put(hrefvmdk,
1071 headers=headers,
1072 data=my_bytes,
1073 verify=vca.verify,
1074 logger=None)
1075
1076 if response.status_code == requests.codes.ok:
1077 bytes_transferred += len(my_bytes)
1078 if progress:
1079 progress_bar.update(bytes_transferred)
1080 else:
1081 self.logger.debug(
1082 'file upload failed with error: [%s] %s' % (response.status_code,
1083 response.content))
1084
1085 f.close()
1086 return False
1087 f.close()
1088 if progress:
1089 progress_bar.finish()
1090 time.sleep(10)
1091 return True
1092 else:
1093 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1094 format(catalog_name, media_file_name))
1095 return False
1096 except Exception as exp:
1097 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1098 .format(catalog_name,media_file_name, exp))
1099 raise vimconn.vimconnException(
1100 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1101 .format(catalog_name,media_file_name, exp))
1102
1103 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1104 return False
1105
1106 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1107 """Upload media file"""
1108 # TODO add named parameters for readability
1109
1110 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1111 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1112
1113 def validate_uuid4(self, uuid_string=None):
1114 """ Method validate correct format of UUID.
1115
1116 Return: true if string represent valid uuid
1117 """
1118 try:
1119 val = uuid.UUID(uuid_string, version=4)
1120 except ValueError:
1121 return False
1122 return True
1123
1124 def get_catalogid(self, catalog_name=None, catalogs=None):
1125 """ Method check catalog and return catalog ID in UUID format.
1126
1127 Args
1128 catalog_name: catalog name as string
1129 catalogs: list of catalogs.
1130
1131 Return: catalogs uuid
1132 """
1133
1134 for catalog in catalogs:
1135 if catalog.name == catalog_name:
1136 catalog_id = catalog.get_id().split(":")
1137 return catalog_id[3]
1138 return None
1139
1140 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1141 """ Method check catalog and return catalog name lookup done by catalog UUID.
1142
1143 Args
1144 catalog_name: catalog name as string
1145 catalogs: list of catalogs.
1146
1147 Return: catalogs name or None
1148 """
1149
1150 if not self.validate_uuid4(uuid_string=catalog_uuid):
1151 return None
1152
1153 for catalog in catalogs:
1154 catalog_id = catalog.get_id().split(":")[3]
1155 if catalog_id == catalog_uuid:
1156 return catalog.name
1157 return None
1158
1159 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1160 """ Method check catalog and return catalog name lookup done by catalog UUID.
1161
1162 Args
1163 catalog_name: catalog name as string
1164 catalogs: list of catalogs.
1165
1166 Return: catalogs name or None
1167 """
1168
1169 if not self.validate_uuid4(uuid_string=catalog_uuid):
1170 return None
1171
1172 for catalog in catalogs:
1173 catalog_id = catalog.get_id().split(":")[3]
1174 if catalog_id == catalog_uuid:
1175 return catalog
1176 return None
1177
1178 def get_image_id_from_path(self, path=None, progress=False):
1179 """ Method upload OVF image to vCloud director.
1180
1181 Each OVF image represented as single catalog entry in vcloud director.
1182 The method check for existing catalog entry. The check done by file name without file extension.
1183
1184 if given catalog name already present method will respond with existing catalog uuid otherwise
1185 it will create new catalog entry and upload OVF file to newly created catalog.
1186
1187 If method can't create catalog entry or upload a file it will throw exception.
1188
1189 Method accept boolean flag progress that will output progress bar. It useful method
1190 for standalone upload use case. In case to test large file upload.
1191
1192 Args
1193 path: - valid path to OVF file.
1194 progress - boolean progress bar show progress bar.
1195
1196 Return: if image uploaded correct method will provide image catalog UUID.
1197 """
1198
1199 if not path:
1200 raise vimconn.vimconnException("Image path can't be None.")
1201
1202 if not os.path.isfile(path):
1203 raise vimconn.vimconnException("Can't read file. File not found.")
1204
1205 if not os.access(path, os.R_OK):
1206 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1207
1208 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1209
1210 dirpath, filename = os.path.split(path)
1211 flname, file_extension = os.path.splitext(path)
1212 if file_extension != '.ovf':
1213 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1214 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1215
1216 catalog_name = os.path.splitext(filename)[0]
1217 catalog_md5_name = hashlib.md5(path).hexdigest()
1218 self.logger.debug("File name {} Catalog Name {} file path {} "
1219 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1220
1221 try:
1222 catalogs = self.vca.get_catalogs()
1223 except Exception as exp:
1224 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1225 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1226
1227 if len(catalogs) == 0:
1228 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1229 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1230 if not result:
1231 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1232 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1233 media_name=filename, medial_file_name=path, progress=progress)
1234 if not result:
1235 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1236 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1237 else:
1238 for catalog in catalogs:
1239 # search for existing catalog if we find same name we return ID
1240 # TODO optimize this
1241 if catalog.name == catalog_md5_name:
1242 self.logger.debug("Found existing catalog entry for {} "
1243 "catalog id {}".format(catalog_name,
1244 self.get_catalogid(catalog_md5_name, catalogs)))
1245 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1246
1247 # if we didn't find existing catalog we create a new one and upload image.
1248 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1249 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1250 if not result:
1251 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1252
1253 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1254 media_name=filename, medial_file_name=path, progress=progress)
1255 if not result:
1256 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1257
1258 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1259
1260 def get_image_list(self, filter_dict={}):
1261 '''Obtain tenant images from VIM
1262 Filter_dict can be:
1263 name: image name
1264 id: image uuid
1265 checksum: image checksum
1266 location: image path
1267 Returns the image list of dictionaries:
1268 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1269 List can be empty
1270 '''
1271
1272 try:
1273 image_list = []
1274 catalogs = self.vca.get_catalogs()
1275 if len(catalogs) == 0:
1276 return image_list
1277 else:
1278 for catalog in catalogs:
1279 catalog_uuid = catalog.get_id().split(":")[3]
1280 name = catalog.name
1281 filtered_dict = {}
1282 if filter_dict.get("name") and filter_dict["name"] != name:
1283 continue
1284 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1285 continue
1286 filtered_dict ["name"] = name
1287 filtered_dict ["id"] = catalog_uuid
1288 image_list.append(filtered_dict)
1289
1290 self.logger.debug("List of already created catalog items: {}".format(image_list))
1291 return image_list
1292 except Exception as exp:
1293 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1294
1295 def get_vappid(self, vdc=None, vapp_name=None):
1296 """ Method takes vdc object and vApp name and returns vapp uuid or None
1297
1298 Args:
1299 vdc: The VDC object.
1300 vapp_name: is application vappp name identifier
1301
1302 Returns:
1303 The return vApp name otherwise None
1304 """
1305 if vdc is None or vapp_name is None:
1306 return None
1307 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1308 try:
1309 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1310 vdc.ResourceEntities.ResourceEntity)
1311 if len(refs) == 1:
1312 return refs[0].href.split("vapp")[1][1:]
1313 except Exception as e:
1314 self.logger.exception(e)
1315 return False
1316 return None
1317
1318 def check_vapp(self, vdc=None, vapp_uuid=None):
1319 """ Method Method returns True or False if vapp deployed in vCloud director
1320
1321 Args:
1322 vca: Connector to VCA
1323 vdc: The VDC object.
1324 vappid: vappid is application identifier
1325
1326 Returns:
1327 The return True if vApp deployed
1328 :param vdc:
1329 :param vapp_uuid:
1330 """
1331 try:
1332 refs = filter(lambda ref:
1333 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1334 vdc.ResourceEntities.ResourceEntity)
1335 for ref in refs:
1336 vappid = ref.href.split("vapp")[1][1:]
1337 # find vapp with respected vapp uuid
1338 if vappid == vapp_uuid:
1339 return True
1340 except Exception as e:
1341 self.logger.exception(e)
1342 return False
1343 return False
1344
1345 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1346 """Method returns vApp name from vCD and lookup done by vapp_id.
1347
1348 Args:
1349 vca: Connector to VCA
1350 vdc: The VDC object.
1351 vapp_uuid: vappid is application identifier
1352
1353 Returns:
1354 The return vApp name otherwise None
1355 """
1356
1357 try:
1358 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1359 vdc.ResourceEntities.ResourceEntity)
1360 for ref in refs:
1361 # we care only about UUID the rest doesn't matter
1362 vappid = ref.href.split("vapp")[1][1:]
1363 if vappid == vapp_uuid:
1364 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1365 logger=self.logger)
1366
1367 #Retry login if session expired & retry sending request
1368 if response.status_code == 403:
1369 response = self.retry_rest('GET', ref.href)
1370
1371 tree = XmlElementTree.fromstring(response.content)
1372 return tree.attrib['name']
1373 except Exception as e:
1374 self.logger.exception(e)
1375 return None
1376 return None
1377
1378 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1379 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1380 """Adds a VM instance to VIM
1381 Params:
1382 'start': (boolean) indicates if VM must start or created in pause mode.
1383 'image_id','flavor_id': image and flavor VIM id to use for the VM
1384 'net_list': list of interfaces, each one is a dictionary with:
1385 'name': (optional) name for the interface.
1386 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1387 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1388 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
1389 'mac_address': (optional) mac address to assign to this interface
1390 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1391 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1392 'type': (mandatory) can be one of:
1393 'virtual', in this case always connected to a network of type 'net_type=bridge'
1394 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1395 can created unconnected
1396 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1397 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1398 are allocated on the same physical NIC
1399 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1400 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1401 or True, it must apply the default VIM behaviour
1402 After execution the method will add the key:
1403 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1404 interface. 'net_list' is modified
1405 'cloud_config': (optional) dictionary with:
1406 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1407 'users': (optional) list of users to be inserted, each item is a dict with:
1408 'name': (mandatory) user name,
1409 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1410 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1411 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1412 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1413 'dest': (mandatory) string with the destination absolute path
1414 'encoding': (optional, by default text). Can be one of:
1415 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1416 'content' (mandatory): string with the content of the file
1417 'permissions': (optional) string with file permissions, typically octal notation '0644'
1418 'owner': (optional) file owner, string with the format 'owner:group'
1419 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1420 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1421 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1422 'size': (mandatory) string with the size of the disk in GB
1423 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1424 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1425 availability_zone_index is None
1426 Returns the instance identifier or raises an exception on error
1427 """
1428 self.logger.info("Creating new instance for entry {}".format(name))
1429 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1430 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1431
1432 #new vm name = vmname + tenant_id + uuid
1433 new_vm_name = [name, '-', str(uuid.uuid4())]
1434 vmname_andid = ''.join(new_vm_name)
1435
1436 # if vm already deployed we return existing uuid
1437 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1438 # if vapp_uuid is not None:
1439 # return vapp_uuid
1440
1441 # we check for presence of VDC, Catalog entry and Flavor.
1442 vdc = self.get_vdc_details()
1443 if vdc is None:
1444 raise vimconn.vimconnNotFoundException(
1445 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1446 catalogs = self.vca.get_catalogs()
1447 if catalogs is None:
1448 #Retry once, if failed by refreshing token
1449 self.get_token()
1450 catalogs = self.vca.get_catalogs()
1451 if catalogs is None:
1452 raise vimconn.vimconnNotFoundException(
1453 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1454
1455 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1456 if catalog_hash_name:
1457 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1458 else:
1459 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1460 "(Failed retrieve catalog information {})".format(name, image_id))
1461
1462
1463 # Set vCPU and Memory based on flavor.
1464 vm_cpus = None
1465 vm_memory = None
1466 vm_disk = None
1467 numas = None
1468
1469 if flavor_id is not None:
1470 if flavor_id not in vimconnector.flavorlist:
1471 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1472 "Failed retrieve flavor information "
1473 "flavor id {}".format(name, flavor_id))
1474 else:
1475 try:
1476 flavor = vimconnector.flavorlist[flavor_id]
1477 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1478 vm_memory = flavor[FLAVOR_RAM_KEY]
1479 vm_disk = flavor[FLAVOR_DISK_KEY]
1480 extended = flavor.get("extended", None)
1481 if extended:
1482 numas=extended.get("numas", None)
1483
1484 except Exception as exp:
1485 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1486
1487 # image upload creates template name as catalog name space Template.
1488 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1489 power_on = 'false'
1490 if start:
1491 power_on = 'true'
1492
1493 # client must provide at least one entry in net_list if not we report error
1494 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1495 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1496 primary_net = None
1497 primary_netname = None
1498 network_mode = 'bridged'
1499 if net_list is not None and len(net_list) > 0:
1500 for net in net_list:
1501 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1502 primary_net = net
1503 if primary_net is None:
1504 primary_net = net_list[0]
1505
1506 try:
1507 primary_net_id = primary_net['net_id']
1508 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1509 if 'name' in network_dict:
1510 primary_netname = network_dict['name']
1511
1512 except KeyError:
1513 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1514 else:
1515 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1516
1517 # use: 'data', 'bridge', 'mgmt'
1518 # create vApp. Set vcpu and ram based on flavor id.
1519 try:
1520 for retry in (1,2):
1521 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1522 self.get_catalogbyid(image_id, catalogs),
1523 network_name=None, # None while creating vapp
1524 network_mode=network_mode,
1525 vm_name=vmname_andid,
1526 vm_cpus=vm_cpus, # can be None if flavor is None
1527 vm_memory=vm_memory) # can be None if flavor is None
1528
1529 if not vapptask and retry==1:
1530 self.get_token() # Retry getting token
1531 continue
1532 else:
1533 break
1534
1535 if vapptask is None or vapptask is False:
1536 raise vimconn.vimconnUnexpectedResponse(
1537 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1538 if type(vapptask) is VappTask:
1539 self.vca.block_until_completed(vapptask)
1540
1541 except Exception as exp:
1542 raise vimconn.vimconnUnexpectedResponse(
1543 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1544
1545 # we should have now vapp in undeployed state.
1546 try:
1547 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1548
1549 except Exception as exp:
1550 raise vimconn.vimconnUnexpectedResponse(
1551 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1552 .format(vmname_andid, exp))
1553
1554 if vapp_uuid is None:
1555 raise vimconn.vimconnUnexpectedResponse(
1556 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1557 vmname_andid))
1558
1559 #Add PCI passthrough/SRIOV configrations
1560 vm_obj = None
1561 pci_devices_info = []
1562 sriov_net_info = []
1563 reserve_memory = False
1564
1565 for net in net_list:
1566 if net["type"]=="PF":
1567 pci_devices_info.append(net)
1568 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1569 sriov_net_info.append(net)
1570
1571 #Add PCI
1572 if len(pci_devices_info) > 0:
1573 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1574 vmname_andid ))
1575 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1576 pci_devices_info,
1577 vmname_andid)
1578 if PCI_devices_status:
1579 self.logger.info("Added PCI devives {} to VM {}".format(
1580 pci_devices_info,
1581 vmname_andid)
1582 )
1583 reserve_memory = True
1584 else:
1585 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1586 pci_devices_info,
1587 vmname_andid)
1588 )
1589
1590 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1591 # Modify vm disk
1592 if vm_disk:
1593 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1594 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1595 if result :
1596 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1597
1598 #Add new or existing disks to vApp
1599 if disk_list:
1600 added_existing_disk = False
1601 for disk in disk_list:
1602 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1603 image_id = disk['image_id']
1604 # Adding CD-ROM to VM
1605 # will revisit code once specification ready to support this feature
1606 self.insert_media_to_vm(vapp, image_id)
1607 elif "image_id" in disk and disk["image_id"] is not None:
1608 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1609 disk["image_id"] , vapp_uuid))
1610 self.add_existing_disk(catalogs=catalogs,
1611 image_id=disk["image_id"],
1612 size = disk["size"],
1613 template_name=templateName,
1614 vapp_uuid=vapp_uuid
1615 )
1616 added_existing_disk = True
1617 else:
1618 #Wait till added existing disk gets reflected into vCD database/API
1619 if added_existing_disk:
1620 time.sleep(5)
1621 added_existing_disk = False
1622 self.add_new_disk(vapp_uuid, disk['size'])
1623
1624 if numas:
1625 # Assigning numa affinity setting
1626 for numa in numas:
1627 if 'paired-threads-id' in numa:
1628 paired_threads_id = numa['paired-threads-id']
1629 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1630
1631 # add NICs & connect to networks in netlist
1632 try:
1633 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1634 nicIndex = 0
1635 primary_nic_index = 0
1636 for net in net_list:
1637 # openmano uses network id in UUID format.
1638 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1639 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1640 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1641
1642 if 'net_id' not in net:
1643 continue
1644
1645 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1646 #Same will be returned in refresh_vms_status() as vim_interface_id
1647 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1648
1649 interface_net_id = net['net_id']
1650 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1651 interface_network_mode = net['use']
1652
1653 if interface_network_mode == 'mgmt':
1654 primary_nic_index = nicIndex
1655
1656 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1657 - DHCP (The IP address is obtained from a DHCP service.)
1658 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1659 - NONE (No IP addressing mode specified.)"""
1660
1661 if primary_netname is not None:
1662 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1663 if len(nets) == 1:
1664 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1665
1666 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1667 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1668 if type(task) is GenericTask:
1669 self.vca.block_until_completed(task)
1670 # connect network to VM - with all DHCP by default
1671
1672 type_list = ['PF','VF','VFnotShared']
1673 if 'type' in net and net['type'] not in type_list:
1674 # fetching nic type from vnf
1675 if 'model' in net:
1676 nic_type = net['model']
1677 self.logger.info("new_vminstance(): adding network adapter "\
1678 "to a network {}".format(nets[0].name))
1679 self.add_network_adapter_to_vms(vapp, nets[0].name,
1680 primary_nic_index,
1681 nicIndex,
1682 net,
1683 nic_type=nic_type)
1684 else:
1685 self.logger.info("new_vminstance(): adding network adapter "\
1686 "to a network {}".format(nets[0].name))
1687 self.add_network_adapter_to_vms(vapp, nets[0].name,
1688 primary_nic_index,
1689 nicIndex,
1690 net)
1691 nicIndex += 1
1692
1693 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1694 # cloud-init for ssh-key injection
1695 if cloud_config:
1696 self.cloud_init(vapp,cloud_config)
1697
1698 # deploy and power on vm
1699 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1700 deploytask = vapp.deploy(powerOn=False)
1701 if type(deploytask) is GenericTask:
1702 self.vca.block_until_completed(deploytask)
1703
1704 # ############# Stub code for SRIOV #################
1705 #Add SRIOV
1706 # if len(sriov_net_info) > 0:
1707 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1708 # vmname_andid ))
1709 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1710 # sriov_net_info,
1711 # vmname_andid)
1712 # if sriov_status:
1713 # self.logger.info("Added SRIOV {} to VM {}".format(
1714 # sriov_net_info,
1715 # vmname_andid)
1716 # )
1717 # reserve_memory = True
1718 # else:
1719 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1720 # sriov_net_info,
1721 # vmname_andid)
1722 # )
1723
1724 # If VM has PCI devices or SRIOV reserve memory for VM
1725 if reserve_memory:
1726 memReserve = vm_obj.config.hardware.memoryMB
1727 spec = vim.vm.ConfigSpec()
1728 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1729 task = vm_obj.ReconfigVM_Task(spec=spec)
1730 if task:
1731 result = self.wait_for_vcenter_task(task, vcenter_conect)
1732 self.logger.info("Reserved memory {} MB for "
1733 "VM VM status: {}".format(str(memReserve), result))
1734 else:
1735 self.logger.info("Fail to reserved memory {} to VM {}".format(
1736 str(memReserve), str(vm_obj)))
1737
1738 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1739
1740 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1741 poweron_task = vapp.poweron()
1742 if type(poweron_task) is GenericTask:
1743 self.vca.block_until_completed(poweron_task)
1744
1745 except Exception as exp :
1746 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1747 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1748 .format(name, exp))
1749 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1750 .format(name, exp))
1751
1752 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1753 wait_time = 0
1754 vapp_uuid = None
1755 while wait_time <= MAX_WAIT_TIME:
1756 try:
1757 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1758 except Exception as exp:
1759 raise vimconn.vimconnUnexpectedResponse(
1760 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1761 .format(vmname_andid, exp))
1762
1763 if vapp and vapp.me.deployed:
1764 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1765 break
1766 else:
1767 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1768 time.sleep(INTERVAL_TIME)
1769
1770 wait_time +=INTERVAL_TIME
1771
1772 if vapp_uuid is not None:
1773 return vapp_uuid
1774 else:
1775 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1776
1777 ##
1778 ##
1779 ## based on current discussion
1780 ##
1781 ##
1782 ## server:
1783 # created: '2016-09-08T11:51:58'
1784 # description: simple-instance.linux1.1
1785 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1786 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1787 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1788 # status: ACTIVE
1789 # error_msg:
1790 # interfaces: …
1791 #
1792 def get_vminstance(self, vim_vm_uuid=None):
1793 """Returns the VM instance information from VIM"""
1794
1795 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1796
1797 vdc = self.get_vdc_details()
1798 if vdc is None:
1799 raise vimconn.vimconnConnectionException(
1800 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1801
1802 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1803 if not vm_info_dict:
1804 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1805 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1806
1807 status_key = vm_info_dict['status']
1808 error = ''
1809 try:
1810 vm_dict = {'created': vm_info_dict['created'],
1811 'description': vm_info_dict['name'],
1812 'status': vcdStatusCode2manoFormat[int(status_key)],
1813 'hostId': vm_info_dict['vmuuid'],
1814 'error_msg': error,
1815 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1816
1817 if 'interfaces' in vm_info_dict:
1818 vm_dict['interfaces'] = vm_info_dict['interfaces']
1819 else:
1820 vm_dict['interfaces'] = []
1821 except KeyError:
1822 vm_dict = {'created': '',
1823 'description': '',
1824 'status': vcdStatusCode2manoFormat[int(-1)],
1825 'hostId': vm_info_dict['vmuuid'],
1826 'error_msg': "Inconsistency state",
1827 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1828
1829 return vm_dict
1830
1831 def delete_vminstance(self, vm__vim_uuid):
1832 """Method poweroff and remove VM instance from vcloud director network.
1833
1834 Args:
1835 vm__vim_uuid: VM UUID
1836
1837 Returns:
1838 Returns the instance identifier
1839 """
1840
1841 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1842
1843 vdc = self.get_vdc_details()
1844 if vdc is None:
1845 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1846 self.tenant_name))
1847 raise vimconn.vimconnException(
1848 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1849
1850 try:
1851 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1852 if vapp_name is None:
1853 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1854 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1855 else:
1856 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1857
1858 # Delete vApp and wait for status change if task executed and vApp is None.
1859 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1860
1861 if vapp:
1862 if vapp.me.deployed:
1863 self.logger.info("Powering off vApp {}".format(vapp_name))
1864 #Power off vApp
1865 powered_off = False
1866 wait_time = 0
1867 while wait_time <= MAX_WAIT_TIME:
1868 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1869 if not vapp:
1870 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1871 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1872
1873 power_off_task = vapp.poweroff()
1874 if type(power_off_task) is GenericTask:
1875 result = self.vca.block_until_completed(power_off_task)
1876 if result:
1877 powered_off = True
1878 break
1879 else:
1880 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1881 time.sleep(INTERVAL_TIME)
1882
1883 wait_time +=INTERVAL_TIME
1884 if not powered_off:
1885 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1886 else:
1887 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1888
1889 #Undeploy vApp
1890 self.logger.info("Undeploy vApp {}".format(vapp_name))
1891 wait_time = 0
1892 undeployed = False
1893 while wait_time <= MAX_WAIT_TIME:
1894 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1895 if not vapp:
1896 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1897 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1898 undeploy_task = vapp.undeploy(action='powerOff')
1899
1900 if type(undeploy_task) is GenericTask:
1901 result = self.vca.block_until_completed(undeploy_task)
1902 if result:
1903 undeployed = True
1904 break
1905 else:
1906 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1907 time.sleep(INTERVAL_TIME)
1908
1909 wait_time +=INTERVAL_TIME
1910
1911 if not undeployed:
1912 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1913
1914 # delete vapp
1915 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1916 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1917
1918 if vapp is not None:
1919 wait_time = 0
1920 result = False
1921
1922 while wait_time <= MAX_WAIT_TIME:
1923 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1924 if not vapp:
1925 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1926 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1927
1928 delete_task = vapp.delete()
1929
1930 if type(delete_task) is GenericTask:
1931 self.vca.block_until_completed(delete_task)
1932 result = self.vca.block_until_completed(delete_task)
1933 if result:
1934 break
1935 else:
1936 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1937 time.sleep(INTERVAL_TIME)
1938
1939 wait_time +=INTERVAL_TIME
1940
1941 if not result:
1942 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1943
1944 except:
1945 self.logger.debug(traceback.format_exc())
1946 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1947
1948 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1949 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1950 return vm__vim_uuid
1951 else:
1952 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1953
1954 def refresh_vms_status(self, vm_list):
1955 """Get the status of the virtual machines and their interfaces/ports
1956 Params: the list of VM identifiers
1957 Returns a dictionary with:
1958 vm_id: #VIM id of this Virtual Machine
1959 status: #Mandatory. Text with one of:
1960 # DELETED (not found at vim)
1961 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1962 # OTHER (Vim reported other status not understood)
1963 # ERROR (VIM indicates an ERROR status)
1964 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1965 # CREATING (on building process), ERROR
1966 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1967 #
1968 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1969 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1970 interfaces:
1971 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1972 mac_address: #Text format XX:XX:XX:XX:XX:XX
1973 vim_net_id: #network id where this interface is connected
1974 vim_interface_id: #interface/port VIM id
1975 ip_address: #null, or text with IPv4, IPv6 address
1976 """
1977
1978 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1979
1980 vdc = self.get_vdc_details()
1981 if vdc is None:
1982 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1983
1984 vms_dict = {}
1985 nsx_edge_list = []
1986 for vmuuid in vm_list:
1987 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1988 if vmname is not None:
1989
1990 try:
1991 vm_pci_details = self.get_vm_pci_details(vmuuid)
1992 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1993 vm_info = the_vapp.get_vms_details()
1994 vm_status = vm_info[0]['status']
1995 vm_info[0].update(vm_pci_details)
1996
1997 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1998 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1999 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2000
2001 # get networks
2002 vm_app_networks = the_vapp.get_vms_network_info()
2003 for vapp_network in vm_app_networks:
2004 for vm_network in vapp_network:
2005 if vm_network['name'] == vmname:
2006 #Assign IP Address based on MAC Address in NSX DHCP lease info
2007 if vm_network['ip'] is None:
2008 if not nsx_edge_list:
2009 nsx_edge_list = self.get_edge_details()
2010 if nsx_edge_list is None:
2011 raise vimconn.vimconnException("refresh_vms_status:"\
2012 "Failed to get edge details from NSX Manager")
2013 if vm_network['mac'] is not None:
2014 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
2015
2016 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
2017 interface = {"mac_address": vm_network['mac'],
2018 "vim_net_id": vm_net_id,
2019 "vim_interface_id": vm_net_id,
2020 'ip_address': vm_network['ip']}
2021 # interface['vim_info'] = yaml.safe_dump(vm_network)
2022 vm_dict["interfaces"].append(interface)
2023 # add a vm to vm dict
2024 vms_dict.setdefault(vmuuid, vm_dict)
2025 except Exception as exp:
2026 self.logger.debug("Error in response {}".format(exp))
2027 self.logger.debug(traceback.format_exc())
2028
2029 return vms_dict
2030
2031
2032 def get_edge_details(self):
2033 """Get the NSX edge list from NSX Manager
2034 Returns list of NSX edges
2035 """
2036 edge_list = []
2037 rheaders = {'Content-Type': 'application/xml'}
2038 nsx_api_url = '/api/4.0/edges'
2039
2040 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2041
2042 try:
2043 resp = requests.get(self.nsx_manager + nsx_api_url,
2044 auth = (self.nsx_user, self.nsx_password),
2045 verify = False, headers = rheaders)
2046 if resp.status_code == requests.codes.ok:
2047 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2048 for edge_pages in paged_Edge_List:
2049 if edge_pages.tag == 'edgePage':
2050 for edge_summary in edge_pages:
2051 if edge_summary.tag == 'pagingInfo':
2052 for element in edge_summary:
2053 if element.tag == 'totalCount' and element.text == '0':
2054 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2055 .format(self.nsx_manager))
2056
2057 if edge_summary.tag == 'edgeSummary':
2058 for element in edge_summary:
2059 if element.tag == 'id':
2060 edge_list.append(element.text)
2061 else:
2062 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2063 .format(self.nsx_manager))
2064
2065 if not edge_list:
2066 raise vimconn.vimconnException("get_edge_details: "\
2067 "No NSX edge details found: {}"
2068 .format(self.nsx_manager))
2069 else:
2070 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2071 return edge_list
2072 else:
2073 self.logger.debug("get_edge_details: "
2074 "Failed to get NSX edge details from NSX Manager: {}"
2075 .format(resp.content))
2076 return None
2077
2078 except Exception as exp:
2079 self.logger.debug("get_edge_details: "\
2080 "Failed to get NSX edge details from NSX Manager: {}"
2081 .format(exp))
2082 raise vimconn.vimconnException("get_edge_details: "\
2083 "Failed to get NSX edge details from NSX Manager: {}"
2084 .format(exp))
2085
2086
2087 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2088 """Get IP address details from NSX edges, using the MAC address
2089 PARAMS: nsx_edges : List of NSX edges
2090 mac_address : Find IP address corresponding to this MAC address
2091 Returns: IP address corrresponding to the provided MAC address
2092 """
2093
2094 ip_addr = None
2095 rheaders = {'Content-Type': 'application/xml'}
2096
2097 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2098
2099 try:
2100 for edge in nsx_edges:
2101 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2102
2103 resp = requests.get(self.nsx_manager + nsx_api_url,
2104 auth = (self.nsx_user, self.nsx_password),
2105 verify = False, headers = rheaders)
2106
2107 if resp.status_code == requests.codes.ok:
2108 dhcp_leases = XmlElementTree.fromstring(resp.text)
2109 for child in dhcp_leases:
2110 if child.tag == 'dhcpLeaseInfo':
2111 dhcpLeaseInfo = child
2112 for leaseInfo in dhcpLeaseInfo:
2113 for elem in leaseInfo:
2114 if (elem.tag)=='macAddress':
2115 edge_mac_addr = elem.text
2116 if (elem.tag)=='ipAddress':
2117 ip_addr = elem.text
2118 if edge_mac_addr is not None:
2119 if edge_mac_addr == mac_address:
2120 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2121 .format(ip_addr, mac_address,edge))
2122 return ip_addr
2123 else:
2124 self.logger.debug("get_ipaddr_from_NSXedge: "\
2125 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2126 .format(resp.content))
2127
2128 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2129 return None
2130
2131 except XmlElementTree.ParseError as Err:
2132 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2133
2134
2135 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
2136 """Send and action over a VM instance from VIM
2137 Returns the vm_id if the action was successfully sent to the VIM"""
2138
2139 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2140 if vm__vim_uuid is None or action_dict is None:
2141 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2142
2143 vdc = self.get_vdc_details()
2144 if vdc is None:
2145 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
2146
2147 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
2148 if vapp_name is None:
2149 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2150 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2151 else:
2152 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2153
2154 try:
2155 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2156 # TODO fix all status
2157 if "start" in action_dict:
2158 vm_info = the_vapp.get_vms_details()
2159 vm_status = vm_info[0]['status']
2160 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2161 if vm_status == "Suspended" or vm_status == "Powered off":
2162 power_on_task = the_vapp.poweron()
2163 result = self.vca.block_until_completed(power_on_task)
2164 self.instance_actions_result("start", result, vapp_name)
2165 elif "rebuild" in action_dict:
2166 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2167 rebuild_task = the_vapp.deploy(powerOn=True)
2168 result = self.vca.block_until_completed(rebuild_task)
2169 self.instance_actions_result("rebuild", result, vapp_name)
2170 elif "pause" in action_dict:
2171 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2172 pause_task = the_vapp.undeploy(action='suspend')
2173 result = self.vca.block_until_completed(pause_task)
2174 self.instance_actions_result("pause", result, vapp_name)
2175 elif "resume" in action_dict:
2176 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2177 power_task = the_vapp.poweron()
2178 result = self.vca.block_until_completed(power_task)
2179 self.instance_actions_result("resume", result, vapp_name)
2180 elif "shutoff" in action_dict or "shutdown" in action_dict:
2181 action_name , value = action_dict.items()[0]
2182 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2183 power_off_task = the_vapp.undeploy(action='powerOff')
2184 result = self.vca.block_until_completed(power_off_task)
2185 if action_name == "shutdown":
2186 self.instance_actions_result("shutdown", result, vapp_name)
2187 else:
2188 self.instance_actions_result("shutoff", result, vapp_name)
2189 elif "forceOff" in action_dict:
2190 result = the_vapp.undeploy(action='force')
2191 self.instance_actions_result("forceOff", result, vapp_name)
2192 elif "reboot" in action_dict:
2193 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2194 reboot_task = the_vapp.reboot()
2195 else:
2196 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2197 return vm__vim_uuid
2198 except Exception as exp :
2199 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2200 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2201
2202 def instance_actions_result(self, action, result, vapp_name):
2203 if result:
2204 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2205 else:
2206 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2207
2208 def get_vminstance_console(self, vm_id, console_type="vnc"):
2209 """
2210 Get a console for the virtual machine
2211 Params:
2212 vm_id: uuid of the VM
2213 console_type, can be:
2214 "novnc" (by default), "xvpvnc" for VNC types,
2215 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2216 Returns dict with the console parameters:
2217 protocol: ssh, ftp, http, https, ...
2218 server: usually ip address
2219 port: the http, ssh, ... port
2220 suffix: extra text, e.g. the http path and query string
2221 """
2222 raise vimconn.vimconnNotImplemented("Should have implemented this")
2223
2224 # NOT USED METHODS in current version
2225
2226 def host_vim2gui(self, host, server_dict):
2227 """Transform host dictionary from VIM format to GUI format,
2228 and append to the server_dict
2229 """
2230 raise vimconn.vimconnNotImplemented("Should have implemented this")
2231
2232 def get_hosts_info(self):
2233 """Get the information of deployed hosts
2234 Returns the hosts content"""
2235 raise vimconn.vimconnNotImplemented("Should have implemented this")
2236
2237 def get_hosts(self, vim_tenant):
2238 """Get the hosts and deployed instances
2239 Returns the hosts content"""
2240 raise vimconn.vimconnNotImplemented("Should have implemented this")
2241
2242 def get_processor_rankings(self):
2243 """Get the processor rankings in the VIM database"""
2244 raise vimconn.vimconnNotImplemented("Should have implemented this")
2245
2246 def new_host(self, host_data):
2247 """Adds a new host to VIM"""
2248 '''Returns status code of the VIM response'''
2249 raise vimconn.vimconnNotImplemented("Should have implemented this")
2250
2251 def new_external_port(self, port_data):
2252 """Adds a external port to VIM"""
2253 '''Returns the port identifier'''
2254 raise vimconn.vimconnNotImplemented("Should have implemented this")
2255
2256 def new_external_network(self, net_name, net_type):
2257 """Adds a external network to VIM (shared)"""
2258 '''Returns the network identifier'''
2259 raise vimconn.vimconnNotImplemented("Should have implemented this")
2260
2261 def connect_port_network(self, port_id, network_id, admin=False):
2262 """Connects a external port to a network"""
2263 '''Returns status code of the VIM response'''
2264 raise vimconn.vimconnNotImplemented("Should have implemented this")
2265
2266 def new_vminstancefromJSON(self, vm_data):
2267 """Adds a VM instance to VIM"""
2268 '''Returns the instance identifier'''
2269 raise vimconn.vimconnNotImplemented("Should have implemented this")
2270
2271 def get_network_name_by_id(self, network_uuid=None):
2272 """Method gets vcloud director network named based on supplied uuid.
2273
2274 Args:
2275 network_uuid: network_id
2276
2277 Returns:
2278 The return network name.
2279 """
2280
2281 if not network_uuid:
2282 return None
2283
2284 try:
2285 org_dict = self.get_org(self.org_uuid)
2286 if 'networks' in org_dict:
2287 org_network_dict = org_dict['networks']
2288 for net_uuid in org_network_dict:
2289 if net_uuid == network_uuid:
2290 return org_network_dict[net_uuid]
2291 except:
2292 self.logger.debug("Exception in get_network_name_by_id")
2293 self.logger.debug(traceback.format_exc())
2294
2295 return None
2296
2297 def get_network_id_by_name(self, network_name=None):
2298 """Method gets vcloud director network uuid based on supplied name.
2299
2300 Args:
2301 network_name: network_name
2302 Returns:
2303 The return network uuid.
2304 network_uuid: network_id
2305 """
2306
2307 if not network_name:
2308 self.logger.debug("get_network_id_by_name() : Network name is empty")
2309 return None
2310
2311 try:
2312 org_dict = self.get_org(self.org_uuid)
2313 if org_dict and 'networks' in org_dict:
2314 org_network_dict = org_dict['networks']
2315 for net_uuid,net_name in org_network_dict.iteritems():
2316 if net_name == network_name:
2317 return net_uuid
2318
2319 except KeyError as exp:
2320 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2321
2322 return None
2323
2324 def list_org_action(self):
2325 """
2326 Method leverages vCloud director and query for available organization for particular user
2327
2328 Args:
2329 vca - is active VCA connection.
2330 vdc_name - is a vdc name that will be used to query vms action
2331
2332 Returns:
2333 The return XML respond
2334 """
2335
2336 url_list = [self.vca.host, '/api/org']
2337 vm_list_rest_call = ''.join(url_list)
2338
2339 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2340 response = Http.get(url=vm_list_rest_call,
2341 headers=self.vca.vcloud_session.get_vcloud_headers(),
2342 verify=self.vca.verify,
2343 logger=self.vca.logger)
2344
2345 if response.status_code == 403:
2346 response = self.retry_rest('GET', vm_list_rest_call)
2347
2348 if response.status_code == requests.codes.ok:
2349 return response.content
2350
2351 return None
2352
2353 def get_org_action(self, org_uuid=None):
2354 """
2355 Method leverages vCloud director and retrieve available object fdr organization.
2356
2357 Args:
2358 vca - is active VCA connection.
2359 vdc_name - is a vdc name that will be used to query vms action
2360
2361 Returns:
2362 The return XML respond
2363 """
2364
2365 if org_uuid is None:
2366 return None
2367
2368 url_list = [self.vca.host, '/api/org/', org_uuid]
2369 vm_list_rest_call = ''.join(url_list)
2370
2371 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2372 response = Http.get(url=vm_list_rest_call,
2373 headers=self.vca.vcloud_session.get_vcloud_headers(),
2374 verify=self.vca.verify,
2375 logger=self.vca.logger)
2376
2377 #Retry login if session expired & retry sending request
2378 if response.status_code == 403:
2379 response = self.retry_rest('GET', vm_list_rest_call)
2380
2381 if response.status_code == requests.codes.ok:
2382 return response.content
2383
2384 return None
2385
2386 def get_org(self, org_uuid=None):
2387 """
2388 Method retrieves available organization in vCloud Director
2389
2390 Args:
2391 org_uuid - is a organization uuid.
2392
2393 Returns:
2394 The return dictionary with following key
2395 "network" - for network list under the org
2396 "catalogs" - for network list under the org
2397 "vdcs" - for vdc list under org
2398 """
2399
2400 org_dict = {}
2401
2402 if org_uuid is None:
2403 return org_dict
2404
2405 content = self.get_org_action(org_uuid=org_uuid)
2406 try:
2407 vdc_list = {}
2408 network_list = {}
2409 catalog_list = {}
2410 vm_list_xmlroot = XmlElementTree.fromstring(content)
2411 for child in vm_list_xmlroot:
2412 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2413 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2414 org_dict['vdcs'] = vdc_list
2415 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2416 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2417 org_dict['networks'] = network_list
2418 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2419 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2420 org_dict['catalogs'] = catalog_list
2421 except:
2422 pass
2423
2424 return org_dict
2425
2426 def get_org_list(self):
2427 """
2428 Method retrieves available organization in vCloud Director
2429
2430 Args:
2431 vca - is active VCA connection.
2432
2433 Returns:
2434 The return dictionary and key for each entry VDC UUID
2435 """
2436
2437 org_dict = {}
2438
2439 content = self.list_org_action()
2440 try:
2441 vm_list_xmlroot = XmlElementTree.fromstring(content)
2442 for vm_xml in vm_list_xmlroot:
2443 if vm_xml.tag.split("}")[1] == 'Org':
2444 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2445 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2446 except:
2447 pass
2448
2449 return org_dict
2450
2451 def vms_view_action(self, vdc_name=None):
2452 """ Method leverages vCloud director vms query call
2453
2454 Args:
2455 vca - is active VCA connection.
2456 vdc_name - is a vdc name that will be used to query vms action
2457
2458 Returns:
2459 The return XML respond
2460 """
2461 vca = self.connect()
2462 if vdc_name is None:
2463 return None
2464
2465 url_list = [vca.host, '/api/vms/query']
2466 vm_list_rest_call = ''.join(url_list)
2467
2468 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2469 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2470 vca.vcloud_session.organization.Link)
2471 if len(refs) == 1:
2472 response = Http.get(url=vm_list_rest_call,
2473 headers=vca.vcloud_session.get_vcloud_headers(),
2474 verify=vca.verify,
2475 logger=vca.logger)
2476 if response.status_code == requests.codes.ok:
2477 return response.content
2478
2479 return None
2480
2481 def get_vapp_list(self, vdc_name=None):
2482 """
2483 Method retrieves vApp list deployed vCloud director and returns a dictionary
2484 contains a list of all vapp deployed for queried VDC.
2485 The key for a dictionary is vApp UUID
2486
2487
2488 Args:
2489 vca - is active VCA connection.
2490 vdc_name - is a vdc name that will be used to query vms action
2491
2492 Returns:
2493 The return dictionary and key for each entry vapp UUID
2494 """
2495
2496 vapp_dict = {}
2497 if vdc_name is None:
2498 return vapp_dict
2499
2500 content = self.vms_view_action(vdc_name=vdc_name)
2501 try:
2502 vm_list_xmlroot = XmlElementTree.fromstring(content)
2503 for vm_xml in vm_list_xmlroot:
2504 if vm_xml.tag.split("}")[1] == 'VMRecord':
2505 if vm_xml.attrib['isVAppTemplate'] == 'true':
2506 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2507 if 'vappTemplate-' in rawuuid[0]:
2508 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2509 # vm and use raw UUID as key
2510 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2511 except:
2512 pass
2513
2514 return vapp_dict
2515
2516 def get_vm_list(self, vdc_name=None):
2517 """
2518 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2519 contains a list of all VM's deployed for queried VDC.
2520 The key for a dictionary is VM UUID
2521
2522
2523 Args:
2524 vca - is active VCA connection.
2525 vdc_name - is a vdc name that will be used to query vms action
2526
2527 Returns:
2528 The return dictionary and key for each entry vapp UUID
2529 """
2530 vm_dict = {}
2531
2532 if vdc_name is None:
2533 return vm_dict
2534
2535 content = self.vms_view_action(vdc_name=vdc_name)
2536 try:
2537 vm_list_xmlroot = XmlElementTree.fromstring(content)
2538 for vm_xml in vm_list_xmlroot:
2539 if vm_xml.tag.split("}")[1] == 'VMRecord':
2540 if vm_xml.attrib['isVAppTemplate'] == 'false':
2541 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2542 if 'vm-' in rawuuid[0]:
2543 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2544 # vm and use raw UUID as key
2545 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2546 except:
2547 pass
2548
2549 return vm_dict
2550
2551 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2552 """
2553 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2554 contains a list of all VM's deployed for queried VDC.
2555 The key for a dictionary is VM UUID
2556
2557
2558 Args:
2559 vca - is active VCA connection.
2560 vdc_name - is a vdc name that will be used to query vms action
2561
2562 Returns:
2563 The return dictionary and key for each entry vapp UUID
2564 """
2565 vm_dict = {}
2566 vca = self.connect()
2567 if not vca:
2568 raise vimconn.vimconnConnectionException("self.connect() is failed")
2569
2570 if vdc_name is None:
2571 return vm_dict
2572
2573 content = self.vms_view_action(vdc_name=vdc_name)
2574 try:
2575 vm_list_xmlroot = XmlElementTree.fromstring(content)
2576 for vm_xml in vm_list_xmlroot:
2577 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2578 # lookup done by UUID
2579 if isuuid:
2580 if vapp_name in vm_xml.attrib['container']:
2581 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2582 if 'vm-' in rawuuid[0]:
2583 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2584 break
2585 # lookup done by Name
2586 else:
2587 if vapp_name in vm_xml.attrib['name']:
2588 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2589 if 'vm-' in rawuuid[0]:
2590 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2591 break
2592 except:
2593 pass
2594
2595 return vm_dict
2596
2597 def get_network_action(self, network_uuid=None):
2598 """
2599 Method leverages vCloud director and query network based on network uuid
2600
2601 Args:
2602 vca - is active VCA connection.
2603 network_uuid - is a network uuid
2604
2605 Returns:
2606 The return XML respond
2607 """
2608
2609 if network_uuid is None:
2610 return None
2611
2612 url_list = [self.vca.host, '/api/network/', network_uuid]
2613 vm_list_rest_call = ''.join(url_list)
2614
2615 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2616 response = Http.get(url=vm_list_rest_call,
2617 headers=self.vca.vcloud_session.get_vcloud_headers(),
2618 verify=self.vca.verify,
2619 logger=self.vca.logger)
2620
2621 #Retry login if session expired & retry sending request
2622 if response.status_code == 403:
2623 response = self.retry_rest('GET', vm_list_rest_call)
2624
2625 if response.status_code == requests.codes.ok:
2626 return response.content
2627
2628 return None
2629
2630 def get_vcd_network(self, network_uuid=None):
2631 """
2632 Method retrieves available network from vCloud Director
2633
2634 Args:
2635 network_uuid - is VCD network UUID
2636
2637 Each element serialized as key : value pair
2638
2639 Following keys available for access. network_configuration['Gateway'}
2640 <Configuration>
2641 <IpScopes>
2642 <IpScope>
2643 <IsInherited>true</IsInherited>
2644 <Gateway>172.16.252.100</Gateway>
2645 <Netmask>255.255.255.0</Netmask>
2646 <Dns1>172.16.254.201</Dns1>
2647 <Dns2>172.16.254.202</Dns2>
2648 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2649 <IsEnabled>true</IsEnabled>
2650 <IpRanges>
2651 <IpRange>
2652 <StartAddress>172.16.252.1</StartAddress>
2653 <EndAddress>172.16.252.99</EndAddress>
2654 </IpRange>
2655 </IpRanges>
2656 </IpScope>
2657 </IpScopes>
2658 <FenceMode>bridged</FenceMode>
2659
2660 Returns:
2661 The return dictionary and key for each entry vapp UUID
2662 """
2663
2664 network_configuration = {}
2665 if network_uuid is None:
2666 return network_uuid
2667
2668 try:
2669 content = self.get_network_action(network_uuid=network_uuid)
2670 vm_list_xmlroot = XmlElementTree.fromstring(content)
2671
2672 network_configuration['status'] = vm_list_xmlroot.get("status")
2673 network_configuration['name'] = vm_list_xmlroot.get("name")
2674 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2675
2676 for child in vm_list_xmlroot:
2677 if child.tag.split("}")[1] == 'IsShared':
2678 network_configuration['isShared'] = child.text.strip()
2679 if child.tag.split("}")[1] == 'Configuration':
2680 for configuration in child.iter():
2681 tagKey = configuration.tag.split("}")[1].strip()
2682 if tagKey != "":
2683 network_configuration[tagKey] = configuration.text.strip()
2684 return network_configuration
2685 except Exception as exp :
2686 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2687 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2688
2689 return network_configuration
2690
2691 def delete_network_action(self, network_uuid=None):
2692 """
2693 Method delete given network from vCloud director
2694
2695 Args:
2696 network_uuid - is a network uuid that client wish to delete
2697
2698 Returns:
2699 The return None or XML respond or false
2700 """
2701
2702 vca = self.connect_as_admin()
2703 if not vca:
2704 raise vimconn.vimconnConnectionException("self.connect() is failed")
2705 if network_uuid is None:
2706 return False
2707
2708 url_list = [vca.host, '/api/admin/network/', network_uuid]
2709 vm_list_rest_call = ''.join(url_list)
2710
2711 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2712 response = Http.delete(url=vm_list_rest_call,
2713 headers=vca.vcloud_session.get_vcloud_headers(),
2714 verify=vca.verify,
2715 logger=vca.logger)
2716
2717 if response.status_code == 202:
2718 return True
2719
2720 return False
2721
2722 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2723 ip_profile=None, isshared='true'):
2724 """
2725 Method create network in vCloud director
2726
2727 Args:
2728 network_name - is network name to be created.
2729 net_type - can be 'bridge','data','ptp','mgmt'.
2730 ip_profile is a dict containing the IP parameters of the network
2731 isshared - is a boolean
2732 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2733 It optional attribute. by default if no parent network indicate the first available will be used.
2734
2735 Returns:
2736 The return network uuid or return None
2737 """
2738
2739 new_network_name = [network_name, '-', str(uuid.uuid4())]
2740 content = self.create_network_rest(network_name=''.join(new_network_name),
2741 ip_profile=ip_profile,
2742 net_type=net_type,
2743 parent_network_uuid=parent_network_uuid,
2744 isshared=isshared)
2745 if content is None:
2746 self.logger.debug("Failed create network {}.".format(network_name))
2747 return None
2748
2749 try:
2750 vm_list_xmlroot = XmlElementTree.fromstring(content)
2751 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2752 if len(vcd_uuid) == 4:
2753 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2754 return vcd_uuid[3]
2755 except:
2756 self.logger.debug("Failed create network {}".format(network_name))
2757 return None
2758
2759 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2760 ip_profile=None, isshared='true'):
2761 """
2762 Method create network in vCloud director
2763
2764 Args:
2765 network_name - is network name to be created.
2766 net_type - can be 'bridge','data','ptp','mgmt'.
2767 ip_profile is a dict containing the IP parameters of the network
2768 isshared - is a boolean
2769 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2770 It optional attribute. by default if no parent network indicate the first available will be used.
2771
2772 Returns:
2773 The return network uuid or return None
2774 """
2775
2776 vca = self.connect_as_admin()
2777 if not vca:
2778 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2779 if network_name is None:
2780 return None
2781
2782 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2783 vm_list_rest_call = ''.join(url_list)
2784 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2785 response = Http.get(url=vm_list_rest_call,
2786 headers=vca.vcloud_session.get_vcloud_headers(),
2787 verify=vca.verify,
2788 logger=vca.logger)
2789
2790 provider_network = None
2791 available_networks = None
2792 add_vdc_rest_url = None
2793
2794 if response.status_code != requests.codes.ok:
2795 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2796 response.status_code))
2797 return None
2798 else:
2799 try:
2800 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2801 for child in vm_list_xmlroot:
2802 if child.tag.split("}")[1] == 'ProviderVdcReference':
2803 provider_network = child.attrib.get('href')
2804 # application/vnd.vmware.admin.providervdc+xml
2805 if child.tag.split("}")[1] == 'Link':
2806 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2807 and child.attrib.get('rel') == 'add':
2808 add_vdc_rest_url = child.attrib.get('href')
2809 except:
2810 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2811 self.logger.debug("Respond body {}".format(response.content))
2812 return None
2813
2814 # find pvdc provided available network
2815 response = Http.get(url=provider_network,
2816 headers=vca.vcloud_session.get_vcloud_headers(),
2817 verify=vca.verify,
2818 logger=vca.logger)
2819 if response.status_code != requests.codes.ok:
2820 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2821 response.status_code))
2822 return None
2823
2824 # available_networks.split("/")[-1]
2825
2826 if parent_network_uuid is None:
2827 try:
2828 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2829 for child in vm_list_xmlroot.iter():
2830 if child.tag.split("}")[1] == 'AvailableNetworks':
2831 for networks in child.iter():
2832 # application/vnd.vmware.admin.network+xml
2833 if networks.attrib.get('href') is not None:
2834 available_networks = networks.attrib.get('href')
2835 break
2836 except:
2837 return None
2838
2839 try:
2840 #Configure IP profile of the network
2841 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2842
2843 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2844 subnet_rand = random.randint(0, 255)
2845 ip_base = "192.168.{}.".format(subnet_rand)
2846 ip_profile['subnet_address'] = ip_base + "0/24"
2847 else:
2848 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2849
2850 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2851 ip_profile['gateway_address']=ip_base + "1"
2852 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2853 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2854 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2855 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2856 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2857 ip_profile['dhcp_start_address']=ip_base + "3"
2858 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2859 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2860 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2861 ip_profile['dns_address']=ip_base + "2"
2862
2863 gateway_address=ip_profile['gateway_address']
2864 dhcp_count=int(ip_profile['dhcp_count'])
2865 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2866
2867 if ip_profile['dhcp_enabled']==True:
2868 dhcp_enabled='true'
2869 else:
2870 dhcp_enabled='false'
2871 dhcp_start_address=ip_profile['dhcp_start_address']
2872
2873 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2874 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2875 end_ip_int += dhcp_count - 1
2876 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2877
2878 ip_version=ip_profile['ip_version']
2879 dns_address=ip_profile['dns_address']
2880 except KeyError as exp:
2881 self.logger.debug("Create Network REST: Key error {}".format(exp))
2882 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2883
2884 # either use client provided UUID or search for a first available
2885 # if both are not defined we return none
2886 if parent_network_uuid is not None:
2887 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2888 add_vdc_rest_url = ''.join(url_list)
2889
2890 #Creating all networks as Direct Org VDC type networks.
2891 #Unused in case of Underlay (data/ptp) network interface.
2892 fence_mode="bridged"
2893 is_inherited='false'
2894 dns_list = dns_address.split(";")
2895 dns1 = dns_list[0]
2896 dns2_text = ""
2897 if len(dns_list) >= 2:
2898 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2899 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2900 <Description>Openmano created</Description>
2901 <Configuration>
2902 <IpScopes>
2903 <IpScope>
2904 <IsInherited>{1:s}</IsInherited>
2905 <Gateway>{2:s}</Gateway>
2906 <Netmask>{3:s}</Netmask>
2907 <Dns1>{4:s}</Dns1>{5:s}
2908 <IsEnabled>{6:s}</IsEnabled>
2909 <IpRanges>
2910 <IpRange>
2911 <StartAddress>{7:s}</StartAddress>
2912 <EndAddress>{8:s}</EndAddress>
2913 </IpRange>
2914 </IpRanges>
2915 </IpScope>
2916 </IpScopes>
2917 <ParentNetwork href="{9:s}"/>
2918 <FenceMode>{10:s}</FenceMode>
2919 </Configuration>
2920 <IsShared>{11:s}</IsShared>
2921 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2922 subnet_address, dns1, dns2_text, dhcp_enabled,
2923 dhcp_start_address, dhcp_end_address, available_networks,
2924 fence_mode, isshared)
2925
2926 headers = vca.vcloud_session.get_vcloud_headers()
2927 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2928 try:
2929 response = Http.post(url=add_vdc_rest_url,
2930 headers=headers,
2931 data=data,
2932 verify=vca.verify,
2933 logger=vca.logger)
2934
2935 if response.status_code != 201:
2936 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2937 .format(response.status_code,response.content))
2938 else:
2939 network = networkType.parseString(response.content, True)
2940 create_nw_task = network.get_Tasks().get_Task()[0]
2941
2942 # if we all ok we respond with content after network creation completes
2943 # otherwise by default return None
2944 if create_nw_task is not None:
2945 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2946 status = vca.block_until_completed(create_nw_task)
2947 if status:
2948 return response.content
2949 else:
2950 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2951 .format(response.content))
2952 except Exception as exp:
2953 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2954
2955 return None
2956
2957 def convert_cidr_to_netmask(self, cidr_ip=None):
2958 """
2959 Method sets convert CIDR netmask address to normal IP format
2960 Args:
2961 cidr_ip : CIDR IP address
2962 Returns:
2963 netmask : Converted netmask
2964 """
2965 if cidr_ip is not None:
2966 if '/' in cidr_ip:
2967 network, net_bits = cidr_ip.split('/')
2968 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2969 else:
2970 netmask = cidr_ip
2971 return netmask
2972 return None
2973
2974 def get_provider_rest(self, vca=None):
2975 """
2976 Method gets provider vdc view from vcloud director
2977
2978 Args:
2979 network_name - is network name to be created.
2980 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2981 It optional attribute. by default if no parent network indicate the first available will be used.
2982
2983 Returns:
2984 The return xml content of respond or None
2985 """
2986
2987 url_list = [vca.host, '/api/admin']
2988 response = Http.get(url=''.join(url_list),
2989 headers=vca.vcloud_session.get_vcloud_headers(),
2990 verify=vca.verify,
2991 logger=vca.logger)
2992
2993 if response.status_code == requests.codes.ok:
2994 return response.content
2995 return None
2996
2997 def create_vdc(self, vdc_name=None):
2998
2999 vdc_dict = {}
3000
3001 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
3002 if xml_content is not None:
3003 try:
3004 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
3005 for child in task_resp_xmlroot:
3006 if child.tag.split("}")[1] == 'Owner':
3007 vdc_id = child.attrib.get('href').split("/")[-1]
3008 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
3009 return vdc_dict
3010 except:
3011 self.logger.debug("Respond body {}".format(xml_content))
3012
3013 return None
3014
3015 def create_vdc_from_tmpl_rest(self, vdc_name=None):
3016 """
3017 Method create vdc in vCloud director based on VDC template.
3018 it uses pre-defined template that must be named openmano
3019
3020 Args:
3021 vdc_name - name of a new vdc.
3022
3023 Returns:
3024 The return xml content of respond or None
3025 """
3026
3027 self.logger.info("Creating new vdc {}".format(vdc_name))
3028 vca = self.connect()
3029 if not vca:
3030 raise vimconn.vimconnConnectionException("self.connect() is failed")
3031 if vdc_name is None:
3032 return None
3033
3034 url_list = [vca.host, '/api/vdcTemplates']
3035 vm_list_rest_call = ''.join(url_list)
3036 response = Http.get(url=vm_list_rest_call,
3037 headers=vca.vcloud_session.get_vcloud_headers(),
3038 verify=vca.verify,
3039 logger=vca.logger)
3040
3041 # container url to a template
3042 vdc_template_ref = None
3043 try:
3044 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3045 for child in vm_list_xmlroot:
3046 # application/vnd.vmware.admin.providervdc+xml
3047 # we need find a template from witch we instantiate VDC
3048 if child.tag.split("}")[1] == 'VdcTemplate':
3049 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
3050 vdc_template_ref = child.attrib.get('href')
3051 except:
3052 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3053 self.logger.debug("Respond body {}".format(response.content))
3054 return None
3055
3056 # if we didn't found required pre defined template we return None
3057 if vdc_template_ref is None:
3058 return None
3059
3060 try:
3061 # instantiate vdc
3062 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
3063 vm_list_rest_call = ''.join(url_list)
3064 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3065 <Source href="{1:s}"></Source>
3066 <Description>opnemano</Description>
3067 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
3068 headers = vca.vcloud_session.get_vcloud_headers()
3069 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
3070 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
3071 logger=vca.logger)
3072
3073 vdc_task = taskType.parseString(response.content, True)
3074 if type(vdc_task) is GenericTask:
3075 self.vca.block_until_completed(vdc_task)
3076
3077 # if we all ok we respond with content otherwise by default None
3078 if response.status_code >= 200 and response.status_code < 300:
3079 return response.content
3080 return None
3081 except:
3082 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3083 self.logger.debug("Respond body {}".format(response.content))
3084
3085 return None
3086
3087 def create_vdc_rest(self, vdc_name=None):
3088 """
3089 Method create network in vCloud director
3090
3091 Args:
3092 network_name - is network name to be created.
3093 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3094 It optional attribute. by default if no parent network indicate the first available will be used.
3095
3096 Returns:
3097 The return network uuid or return None
3098 """
3099
3100 self.logger.info("Creating new vdc {}".format(vdc_name))
3101
3102 vca = self.connect_as_admin()
3103 if not vca:
3104 raise vimconn.vimconnConnectionException("self.connect() is failed")
3105 if vdc_name is None:
3106 return None
3107
3108 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
3109 vm_list_rest_call = ''.join(url_list)
3110 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3111 response = Http.get(url=vm_list_rest_call,
3112 headers=vca.vcloud_session.get_vcloud_headers(),
3113 verify=vca.verify,
3114 logger=vca.logger)
3115
3116 provider_vdc_ref = None
3117 add_vdc_rest_url = None
3118 available_networks = None
3119
3120 if response.status_code != requests.codes.ok:
3121 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3122 response.status_code))
3123 return None
3124 else:
3125 try:
3126 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3127 for child in vm_list_xmlroot:
3128 # application/vnd.vmware.admin.providervdc+xml
3129 if child.tag.split("}")[1] == 'Link':
3130 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
3131 and child.attrib.get('rel') == 'add':
3132 add_vdc_rest_url = child.attrib.get('href')
3133 except:
3134 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3135 self.logger.debug("Respond body {}".format(response.content))
3136 return None
3137
3138 response = self.get_provider_rest(vca=vca)
3139 try:
3140 vm_list_xmlroot = XmlElementTree.fromstring(response)
3141 for child in vm_list_xmlroot:
3142 if child.tag.split("}")[1] == 'ProviderVdcReferences':
3143 for sub_child in child:
3144 provider_vdc_ref = sub_child.attrib.get('href')
3145 except:
3146 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3147 self.logger.debug("Respond body {}".format(response))
3148 return None
3149
3150 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3151 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3152 <AllocationModel>ReservationPool</AllocationModel>
3153 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3154 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3155 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3156 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3157 <ProviderVdcReference
3158 name="Main Provider"
3159 href="{2:s}" />
3160 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3161 escape(vdc_name),
3162 provider_vdc_ref)
3163
3164 headers = vca.vcloud_session.get_vcloud_headers()
3165 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3166 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3167 logger=vca.logger)
3168
3169 # if we all ok we respond with content otherwise by default None
3170 if response.status_code == 201:
3171 return response.content
3172 return None
3173
3174 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3175 """
3176 Method retrieve vapp detail from vCloud director
3177
3178 Args:
3179 vapp_uuid - is vapp identifier.
3180
3181 Returns:
3182 The return network uuid or return None
3183 """
3184
3185 parsed_respond = {}
3186 vca = None
3187
3188 if need_admin_access:
3189 vca = self.connect_as_admin()
3190 else:
3191 vca = self.vca
3192
3193 if not vca:
3194 raise vimconn.vimconnConnectionException("self.connect() is failed")
3195 if vapp_uuid is None:
3196 return None
3197
3198 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3199 get_vapp_restcall = ''.join(url_list)
3200
3201 if vca.vcloud_session and vca.vcloud_session.organization:
3202 response = Http.get(url=get_vapp_restcall,
3203 headers=vca.vcloud_session.get_vcloud_headers(),
3204 verify=vca.verify,
3205 logger=vca.logger)
3206
3207 if response.status_code == 403:
3208 if need_admin_access == False:
3209 response = self.retry_rest('GET', get_vapp_restcall)
3210
3211 if response.status_code != requests.codes.ok:
3212 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3213 response.status_code))
3214 return parsed_respond
3215
3216 try:
3217 xmlroot_respond = XmlElementTree.fromstring(response.content)
3218 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3219
3220 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3221 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3222 'vmw': 'http://www.vmware.com/schema/ovf',
3223 'vm': 'http://www.vmware.com/vcloud/v1.5',
3224 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3225 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3226 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3227 }
3228
3229 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3230 if created_section is not None:
3231 parsed_respond['created'] = created_section.text
3232
3233 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3234 if network_section is not None and 'networkName' in network_section.attrib:
3235 parsed_respond['networkname'] = network_section.attrib['networkName']
3236
3237 ipscopes_section = \
3238 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3239 namespaces)
3240 if ipscopes_section is not None:
3241 for ipscope in ipscopes_section:
3242 for scope in ipscope:
3243 tag_key = scope.tag.split("}")[1]
3244 if tag_key == 'IpRanges':
3245 ip_ranges = scope.getchildren()
3246 for ipblock in ip_ranges:
3247 for block in ipblock:
3248 parsed_respond[block.tag.split("}")[1]] = block.text
3249 else:
3250 parsed_respond[tag_key] = scope.text
3251
3252 # parse children section for other attrib
3253 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3254 if children_section is not None:
3255 parsed_respond['name'] = children_section.attrib['name']
3256 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3257 if "nestedHypervisorEnabled" in children_section.attrib else None
3258 parsed_respond['deployed'] = children_section.attrib['deployed']
3259 parsed_respond['status'] = children_section.attrib['status']
3260 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3261 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3262 nic_list = []
3263 for adapters in network_adapter:
3264 adapter_key = adapters.tag.split("}")[1]
3265 if adapter_key == 'PrimaryNetworkConnectionIndex':
3266 parsed_respond['primarynetwork'] = adapters.text
3267 if adapter_key == 'NetworkConnection':
3268 vnic = {}
3269 if 'network' in adapters.attrib:
3270 vnic['network'] = adapters.attrib['network']
3271 for adapter in adapters:
3272 setting_key = adapter.tag.split("}")[1]
3273 vnic[setting_key] = adapter.text
3274 nic_list.append(vnic)
3275
3276 for link in children_section:
3277 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3278 if link.attrib['rel'] == 'screen:acquireTicket':
3279 parsed_respond['acquireTicket'] = link.attrib
3280 if link.attrib['rel'] == 'screen:acquireMksTicket':
3281 parsed_respond['acquireMksTicket'] = link.attrib
3282
3283 parsed_respond['interfaces'] = nic_list
3284 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3285 if vCloud_extension_section is not None:
3286 vm_vcenter_info = {}
3287 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3288 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3289 if vmext is not None:
3290 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3291 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3292
3293 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3294 vm_virtual_hardware_info = {}
3295 if virtual_hardware_section is not None:
3296 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3297 if item.find("rasd:Description",namespaces).text == "Hard disk":
3298 disk_size = item.find("rasd:HostResource" ,namespaces
3299 ).attrib["{"+namespaces['vm']+"}capacity"]
3300
3301 vm_virtual_hardware_info["disk_size"]= disk_size
3302 break
3303
3304 for link in virtual_hardware_section:
3305 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3306 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3307 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3308 break
3309
3310 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3311 except Exception as exp :
3312 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3313 return parsed_respond
3314
3315 def acuire_console(self, vm_uuid=None):
3316
3317 if vm_uuid is None:
3318 return None
3319
3320 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3321 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3322 console_dict = vm_dict['acquireTicket']
3323 console_rest_call = console_dict['href']
3324
3325 response = Http.post(url=console_rest_call,
3326 headers=self.vca.vcloud_session.get_vcloud_headers(),
3327 verify=self.vca.verify,
3328 logger=self.vca.logger)
3329 if response.status_code == 403:
3330 response = self.retry_rest('POST', console_rest_call)
3331
3332 if response.status_code == requests.codes.ok:
3333 return response.content
3334
3335 return None
3336
3337 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3338 """
3339 Method retrieve vm disk details
3340
3341 Args:
3342 vapp_uuid - is vapp identifier.
3343 flavor_disk - disk size as specified in VNFD (flavor)
3344
3345 Returns:
3346 The return network uuid or return None
3347 """
3348 status = None
3349 try:
3350 #Flavor disk is in GB convert it into MB
3351 flavor_disk = int(flavor_disk) * 1024
3352 vm_details = self.get_vapp_details_rest(vapp_uuid)
3353 if vm_details:
3354 vm_name = vm_details["name"]
3355 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3356
3357 if vm_details and "vm_virtual_hardware" in vm_details:
3358 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3359 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3360
3361 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3362
3363 if flavor_disk > vm_disk:
3364 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3365 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3366 vm_disk, flavor_disk ))
3367 else:
3368 status = True
3369 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3370
3371 return status
3372 except Exception as exp:
3373 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3374
3375
3376 def modify_vm_disk_rest(self, disk_href , disk_size):
3377 """
3378 Method retrieve modify vm disk size
3379
3380 Args:
3381 disk_href - vCD API URL to GET and PUT disk data
3382 disk_size - disk size as specified in VNFD (flavor)
3383
3384 Returns:
3385 The return network uuid or return None
3386 """
3387 if disk_href is None or disk_size is None:
3388 return None
3389
3390 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3391 response = Http.get(url=disk_href,
3392 headers=self.vca.vcloud_session.get_vcloud_headers(),
3393 verify=self.vca.verify,
3394 logger=self.vca.logger)
3395
3396 if response.status_code == 403:
3397 response = self.retry_rest('GET', disk_href)
3398
3399 if response.status_code != requests.codes.ok:
3400 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3401 response.status_code))
3402 return None
3403 try:
3404 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3405 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3406 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3407
3408 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3409 if item.find("rasd:Description",namespaces).text == "Hard disk":
3410 disk_item = item.find("rasd:HostResource" ,namespaces )
3411 if disk_item is not None:
3412 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3413 break
3414
3415 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3416 xml_declaration=True)
3417
3418 #Send PUT request to modify disk size
3419 headers = self.vca.vcloud_session.get_vcloud_headers()
3420 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3421
3422 response = Http.put(url=disk_href,
3423 data=data,
3424 headers=headers,
3425 verify=self.vca.verify, logger=self.logger)
3426
3427 if response.status_code == 403:
3428 add_headers = {'Content-Type': headers['Content-Type']}
3429 response = self.retry_rest('PUT', disk_href, add_headers, data)
3430
3431 if response.status_code != 202:
3432 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3433 response.status_code))
3434 else:
3435 modify_disk_task = taskType.parseString(response.content, True)
3436 if type(modify_disk_task) is GenericTask:
3437 status = self.vca.block_until_completed(modify_disk_task)
3438 return status
3439
3440 return None
3441
3442 except Exception as exp :
3443 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3444 return None
3445
3446 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3447 """
3448 Method to attach pci devices to VM
3449
3450 Args:
3451 vapp_uuid - uuid of vApp/VM
3452 pci_devices - pci devices infromation as specified in VNFD (flavor)
3453
3454 Returns:
3455 The status of add pci device task , vm object and
3456 vcenter_conect object
3457 """
3458 vm_obj = None
3459 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3460 vcenter_conect, content = self.get_vcenter_content()
3461 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3462
3463 if vm_moref_id:
3464 try:
3465 no_of_pci_devices = len(pci_devices)
3466 if no_of_pci_devices > 0:
3467 #Get VM and its host
3468 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3469 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3470 if host_obj and vm_obj:
3471 #get PCI devies from host on which vapp is currently installed
3472 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3473
3474 if avilable_pci_devices is None:
3475 #find other hosts with active pci devices
3476 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3477 content,
3478 no_of_pci_devices
3479 )
3480
3481 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3482 #Migrate vm to the host where PCI devices are availble
3483 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3484 task = self.relocate_vm(new_host_obj, vm_obj)
3485 if task is not None:
3486 result = self.wait_for_vcenter_task(task, vcenter_conect)
3487 self.logger.info("Migrate VM status: {}".format(result))
3488 host_obj = new_host_obj
3489 else:
3490 self.logger.info("Fail to migrate VM : {}".format(result))
3491 raise vimconn.vimconnNotFoundException(
3492 "Fail to migrate VM : {} to host {}".format(
3493 vmname_andid,
3494 new_host_obj)
3495 )
3496
3497 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3498 #Add PCI devices one by one
3499 for pci_device in avilable_pci_devices:
3500 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3501 if task:
3502 status= self.wait_for_vcenter_task(task, vcenter_conect)
3503 if status:
3504 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3505 else:
3506 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3507 return True, vm_obj, vcenter_conect
3508 else:
3509 self.logger.error("Currently there is no host with"\
3510 " {} number of avaialble PCI devices required for VM {}".format(
3511 no_of_pci_devices,
3512 vmname_andid)
3513 )
3514 raise vimconn.vimconnNotFoundException(
3515 "Currently there is no host with {} "\
3516 "number of avaialble PCI devices required for VM {}".format(
3517 no_of_pci_devices,
3518 vmname_andid))
3519 else:
3520 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3521
3522 except vmodl.MethodFault as error:
3523 self.logger.error("Error occurred while adding PCI devices {} ",error)
3524 return None, vm_obj, vcenter_conect
3525
3526 def get_vm_obj(self, content, mob_id):
3527 """
3528 Method to get the vsphere VM object associated with a given morf ID
3529 Args:
3530 vapp_uuid - uuid of vApp/VM
3531 content - vCenter content object
3532 mob_id - mob_id of VM
3533
3534 Returns:
3535 VM and host object
3536 """
3537 vm_obj = None
3538 host_obj = None
3539 try :
3540 container = content.viewManager.CreateContainerView(content.rootFolder,
3541 [vim.VirtualMachine], True
3542 )
3543 for vm in container.view:
3544 mobID = vm._GetMoId()
3545 if mobID == mob_id:
3546 vm_obj = vm
3547 host_obj = vm_obj.runtime.host
3548 break
3549 except Exception as exp:
3550 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3551 return host_obj, vm_obj
3552
3553 def get_pci_devices(self, host, need_devices):
3554 """
3555 Method to get the details of pci devices on given host
3556 Args:
3557 host - vSphere host object
3558 need_devices - number of pci devices needed on host
3559
3560 Returns:
3561 array of pci devices
3562 """
3563 all_devices = []
3564 all_device_ids = []
3565 used_devices_ids = []
3566
3567 try:
3568 if host:
3569 pciPassthruInfo = host.config.pciPassthruInfo
3570 pciDevies = host.hardware.pciDevice
3571
3572 for pci_status in pciPassthruInfo:
3573 if pci_status.passthruActive:
3574 for device in pciDevies:
3575 if device.id == pci_status.id:
3576 all_device_ids.append(device.id)
3577 all_devices.append(device)
3578
3579 #check if devices are in use
3580 avalible_devices = all_devices
3581 for vm in host.vm:
3582 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3583 vm_devices = vm.config.hardware.device
3584 for device in vm_devices:
3585 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3586 if device.backing.id in all_device_ids:
3587 for use_device in avalible_devices:
3588 if use_device.id == device.backing.id:
3589 avalible_devices.remove(use_device)
3590 used_devices_ids.append(device.backing.id)
3591 self.logger.debug("Device {} from devices {}"\
3592 "is in use".format(device.backing.id,
3593 device)
3594 )
3595 if len(avalible_devices) < need_devices:
3596 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3597 need_devices))
3598 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3599 avalible_devices))
3600 return None
3601 else:
3602 required_devices = avalible_devices[:need_devices]
3603 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3604 len(avalible_devices),
3605 host,
3606 need_devices))
3607 self.logger.info("Retruning {} devices as {}".format(need_devices,
3608 required_devices ))
3609 return required_devices
3610
3611 except Exception as exp:
3612 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3613
3614 return None
3615
3616 def get_host_and_PCIdevices(self, content, need_devices):
3617 """
3618 Method to get the details of pci devices infromation on all hosts
3619
3620 Args:
3621 content - vSphere host object
3622 need_devices - number of pci devices needed on host
3623
3624 Returns:
3625 array of pci devices and host object
3626 """
3627 host_obj = None
3628 pci_device_objs = None
3629 try:
3630 if content:
3631 container = content.viewManager.CreateContainerView(content.rootFolder,
3632 [vim.HostSystem], True)
3633 for host in container.view:
3634 devices = self.get_pci_devices(host, need_devices)
3635 if devices:
3636 host_obj = host
3637 pci_device_objs = devices
3638 break
3639 except Exception as exp:
3640 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3641
3642 return host_obj,pci_device_objs
3643
3644 def relocate_vm(self, dest_host, vm) :
3645 """
3646 Method to get the relocate VM to new host
3647
3648 Args:
3649 dest_host - vSphere host object
3650 vm - vSphere VM object
3651
3652 Returns:
3653 task object
3654 """
3655 task = None
3656 try:
3657 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3658 task = vm.Relocate(relocate_spec)
3659 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3660 except Exception as exp:
3661 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3662 dest_host, vm, exp))
3663 return task
3664
3665 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3666 """
3667 Waits and provides updates on a vSphere task
3668 """
3669 while task.info.state == vim.TaskInfo.State.running:
3670 time.sleep(2)
3671
3672 if task.info.state == vim.TaskInfo.State.success:
3673 if task.info.result is not None and not hideResult:
3674 self.logger.info('{} completed successfully, result: {}'.format(
3675 actionName,
3676 task.info.result))
3677 else:
3678 self.logger.info('Task {} completed successfully.'.format(actionName))
3679 else:
3680 self.logger.error('{} did not complete successfully: {} '.format(
3681 actionName,
3682 task.info.error)
3683 )
3684
3685 return task.info.result
3686
3687 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3688 """
3689 Method to add pci device in given VM
3690
3691 Args:
3692 host_object - vSphere host object
3693 vm_object - vSphere VM object
3694 host_pci_dev - host_pci_dev must be one of the devices from the
3695 host_object.hardware.pciDevice list
3696 which is configured as a PCI passthrough device
3697
3698 Returns:
3699 task object
3700 """
3701 task = None
3702 if vm_object and host_object and host_pci_dev:
3703 try :
3704 #Add PCI device to VM
3705 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3706 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3707
3708 if host_pci_dev.id not in systemid_by_pciid:
3709 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3710 return None
3711
3712 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3713 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3714 id=host_pci_dev.id,
3715 systemId=systemid_by_pciid[host_pci_dev.id],
3716 vendorId=host_pci_dev.vendorId,
3717 deviceName=host_pci_dev.deviceName)
3718
3719 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3720
3721 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3722 new_device_config.operation = "add"
3723 vmConfigSpec = vim.vm.ConfigSpec()
3724 vmConfigSpec.deviceChange = [new_device_config]
3725
3726 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3727 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3728 host_pci_dev, vm_object, host_object)
3729 )
3730 except Exception as exp:
3731 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3732 host_pci_dev,
3733 vm_object,
3734 exp))
3735 return task
3736
3737 def get_vm_vcenter_info(self):
3738 """
3739 Method to get details of vCenter and vm
3740
3741 Args:
3742 vapp_uuid - uuid of vApp or VM
3743
3744 Returns:
3745 Moref Id of VM and deails of vCenter
3746 """
3747 vm_vcenter_info = {}
3748
3749 if self.vcenter_ip is not None:
3750 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3751 else:
3752 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3753 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3754 if self.vcenter_port is not None:
3755 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3756 else:
3757 raise vimconn.vimconnException(message="vCenter port is not provided."\
3758 " Please provide vCenter port while attaching datacenter to tenant in --config")
3759 if self.vcenter_user is not None:
3760 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3761 else:
3762 raise vimconn.vimconnException(message="vCenter user is not provided."\
3763 " Please provide vCenter user while attaching datacenter to tenant in --config")
3764
3765 if self.vcenter_password is not None:
3766 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3767 else:
3768 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3769 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3770
3771 return vm_vcenter_info
3772
3773
3774 def get_vm_pci_details(self, vmuuid):
3775 """
3776 Method to get VM PCI device details from vCenter
3777
3778 Args:
3779 vm_obj - vSphere VM object
3780
3781 Returns:
3782 dict of PCI devives attached to VM
3783
3784 """
3785 vm_pci_devices_info = {}
3786 try:
3787 vcenter_conect, content = self.get_vcenter_content()
3788 vm_moref_id = self.get_vm_moref_id(vmuuid)
3789 if vm_moref_id:
3790 #Get VM and its host
3791 if content:
3792 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3793 if host_obj and vm_obj:
3794 vm_pci_devices_info["host_name"]= host_obj.name
3795 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3796 for device in vm_obj.config.hardware.device:
3797 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3798 device_details={'devide_id':device.backing.id,
3799 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3800 }
3801 vm_pci_devices_info[device.deviceInfo.label] = device_details
3802 else:
3803 self.logger.error("Can not connect to vCenter while getting "\
3804 "PCI devices infromationn")
3805 return vm_pci_devices_info
3806 except Exception as exp:
3807 self.logger.error("Error occurred while getting VM infromationn"\
3808 " for VM : {}".format(exp))
3809 raise vimconn.vimconnException(message=exp)
3810
3811 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3812 """
3813 Method to add network adapter type to vm
3814 Args :
3815 network_name - name of network
3816 primary_nic_index - int value for primary nic index
3817 nicIndex - int value for nic index
3818 nic_type - specify model name to which add to vm
3819 Returns:
3820 None
3821 """
3822
3823 try:
3824 ip_address = None
3825 floating_ip = False
3826 if 'floating_ip' in net: floating_ip = net['floating_ip']
3827
3828 # Stub for ip_address feature
3829 if 'ip_address' in net: ip_address = net['ip_address']
3830
3831 if floating_ip:
3832 allocation_mode = "POOL"
3833 elif ip_address:
3834 allocation_mode = "MANUAL"
3835 else:
3836 allocation_mode = "DHCP"
3837
3838 if not nic_type:
3839 for vms in vapp._get_vms():
3840 vm_id = (vms.id).split(':')[-1]
3841
3842 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3843
3844 response = Http.get(url=url_rest_call,
3845 headers=self.vca.vcloud_session.get_vcloud_headers(),
3846 verify=self.vca.verify,
3847 logger=self.vca.logger)
3848
3849 if response.status_code == 403:
3850 response = self.retry_rest('GET', url_rest_call)
3851
3852 if response.status_code != 200:
3853 self.logger.error("REST call {} failed reason : {}"\
3854 "status code : {}".format(url_rest_call,
3855 response.content,
3856 response.status_code))
3857 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3858 "network connection section")
3859
3860 data = response.content
3861 if '<PrimaryNetworkConnectionIndex>' not in data:
3862 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3863 <NetworkConnection network="{}">
3864 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3865 <IsConnected>true</IsConnected>
3866 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3867 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3868 allocation_mode)
3869 # Stub for ip_address feature
3870 if ip_address:
3871 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3872 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3873
3874 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3875 else:
3876 new_item = """<NetworkConnection network="{}">
3877 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3878 <IsConnected>true</IsConnected>
3879 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3880 </NetworkConnection>""".format(network_name, nicIndex,
3881 allocation_mode)
3882 # Stub for ip_address feature
3883 if ip_address:
3884 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3885 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3886
3887 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3888
3889 headers = self.vca.vcloud_session.get_vcloud_headers()
3890 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3891 response = Http.put(url=url_rest_call, headers=headers, data=data,
3892 verify=self.vca.verify,
3893 logger=self.vca.logger)
3894
3895 if response.status_code == 403:
3896 add_headers = {'Content-Type': headers['Content-Type']}
3897 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3898
3899 if response.status_code != 202:
3900 self.logger.error("REST call {} failed reason : {}"\
3901 "status code : {} ".format(url_rest_call,
3902 response.content,
3903 response.status_code))
3904 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3905 "network connection section")
3906 else:
3907 nic_task = taskType.parseString(response.content, True)
3908 if isinstance(nic_task, GenericTask):
3909 self.vca.block_until_completed(nic_task)
3910 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3911 "default NIC type".format(vm_id))
3912 else:
3913 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3914 "connect NIC type".format(vm_id))
3915 else:
3916 for vms in vapp._get_vms():
3917 vm_id = (vms.id).split(':')[-1]
3918
3919 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3920
3921 response = Http.get(url=url_rest_call,
3922 headers=self.vca.vcloud_session.get_vcloud_headers(),
3923 verify=self.vca.verify,
3924 logger=self.vca.logger)
3925
3926 if response.status_code == 403:
3927 response = self.retry_rest('GET', url_rest_call)
3928
3929 if response.status_code != 200:
3930 self.logger.error("REST call {} failed reason : {}"\
3931 "status code : {}".format(url_rest_call,
3932 response.content,
3933 response.status_code))
3934 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3935 "network connection section")
3936 data = response.content
3937 if '<PrimaryNetworkConnectionIndex>' not in data:
3938 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3939 <NetworkConnection network="{}">
3940 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3941 <IsConnected>true</IsConnected>
3942 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3943 <NetworkAdapterType>{}</NetworkAdapterType>
3944 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3945 allocation_mode, nic_type)
3946 # Stub for ip_address feature
3947 if ip_address:
3948 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3949 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3950
3951 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3952 else:
3953 new_item = """<NetworkConnection network="{}">
3954 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3955 <IsConnected>true</IsConnected>
3956 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3957 <NetworkAdapterType>{}</NetworkAdapterType>
3958 </NetworkConnection>""".format(network_name, nicIndex,
3959 allocation_mode, nic_type)
3960 # Stub for ip_address feature
3961 if ip_address:
3962 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3963 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3964
3965 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3966
3967 headers = self.vca.vcloud_session.get_vcloud_headers()
3968 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3969 response = Http.put(url=url_rest_call, headers=headers, data=data,
3970 verify=self.vca.verify,
3971 logger=self.vca.logger)
3972
3973 if response.status_code == 403:
3974 add_headers = {'Content-Type': headers['Content-Type']}
3975 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3976
3977 if response.status_code != 202:
3978 self.logger.error("REST call {} failed reason : {}"\
3979 "status code : {}".format(url_rest_call,
3980 response.content,
3981 response.status_code))
3982 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3983 "network connection section")
3984 else:
3985 nic_task = taskType.parseString(response.content, True)
3986 if isinstance(nic_task, GenericTask):
3987 self.vca.block_until_completed(nic_task)
3988 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3989 "conneced to NIC type {}".format(vm_id, nic_type))
3990 else:
3991 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3992 "failed to connect NIC type {}".format(vm_id, nic_type))
3993 except Exception as exp:
3994 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3995 "while adding Network adapter")
3996 raise vimconn.vimconnException(message=exp)
3997
3998
3999 def set_numa_affinity(self, vmuuid, paired_threads_id):
4000 """
4001 Method to assign numa affinity in vm configuration parammeters
4002 Args :
4003 vmuuid - vm uuid
4004 paired_threads_id - one or more virtual processor
4005 numbers
4006 Returns:
4007 return if True
4008 """
4009 try:
4010 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
4011 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
4012 context = None
4013 if hasattr(ssl, '_create_unverified_context'):
4014 context = ssl._create_unverified_context()
4015 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
4016 pwd=self.passwd, port=int(vm_vcenter_port),
4017 sslContext=context)
4018 atexit.register(Disconnect, vcenter_conect)
4019 content = vcenter_conect.RetrieveContent()
4020
4021 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
4022 if vm_obj:
4023 config_spec = vim.vm.ConfigSpec()
4024 config_spec.extraConfig = []
4025 opt = vim.option.OptionValue()
4026 opt.key = 'numa.nodeAffinity'
4027 opt.value = str(paired_threads_id)
4028 config_spec.extraConfig.append(opt)
4029 task = vm_obj.ReconfigVM_Task(config_spec)
4030 if task:
4031 result = self.wait_for_vcenter_task(task, vcenter_conect)
4032 extra_config = vm_obj.config.extraConfig
4033 flag = False
4034 for opts in extra_config:
4035 if 'numa.nodeAffinity' in opts.key:
4036 flag = True
4037 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
4038 "value {} for vm {}".format(opt.value, vm_obj))
4039 if flag:
4040 return
4041 else:
4042 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
4043 except Exception as exp:
4044 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
4045 "for VM {} : {}".format(vm_obj, vm_moref_id))
4046 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
4047 "affinity".format(exp))
4048
4049
4050 def cloud_init(self, vapp, cloud_config):
4051 """
4052 Method to inject ssh-key
4053 vapp - vapp object
4054 cloud_config a dictionary with:
4055 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
4056 'users': (optional) list of users to be inserted, each item is a dict with:
4057 'name': (mandatory) user name,
4058 'key-pairs': (optional) list of strings with the public key to be inserted to the user
4059 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
4060 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
4061 'config-files': (optional). List of files to be transferred. Each item is a dict with:
4062 'dest': (mandatory) string with the destination absolute path
4063 'encoding': (optional, by default text). Can be one of:
4064 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
4065 'content' (mandatory): string with the content of the file
4066 'permissions': (optional) string with file permissions, typically octal notation '0644'
4067 'owner': (optional) file owner, string with the format 'owner:group'
4068 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
4069 """
4070 try:
4071 if not isinstance(cloud_config, dict):
4072 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
4073 else:
4074 key_pairs = []
4075 userdata = []
4076 if "key-pairs" in cloud_config:
4077 key_pairs = cloud_config["key-pairs"]
4078
4079 if "users" in cloud_config:
4080 userdata = cloud_config["users"]
4081
4082 self.logger.debug("cloud_init : Guest os customization started..")
4083 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
4084 self.guest_customization(vapp, customize_script)
4085
4086 except Exception as exp:
4087 self.logger.error("cloud_init : exception occurred while injecting "\
4088 "ssh-key")
4089 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
4090 "ssh-key".format(exp))
4091
4092 def format_script(self, key_pairs=[], users_list=[]):
4093 bash_script = """
4094 #!/bin/bash
4095 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4096 if [ "$1" = "precustomization" ];then
4097 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
4098 """
4099
4100 keys = "\n".join(key_pairs)
4101 if keys:
4102 keys_data = """
4103 if [ ! -d /root/.ssh ];then
4104 mkdir /root/.ssh
4105 chown root:root /root/.ssh
4106 chmod 700 /root/.ssh
4107 touch /root/.ssh/authorized_keys
4108 chown root:root /root/.ssh/authorized_keys
4109 chmod 600 /root/.ssh/authorized_keys
4110 # make centos with selinux happy
4111 which restorecon && restorecon -Rv /root/.ssh
4112 else
4113 touch /root/.ssh/authorized_keys
4114 chown root:root /root/.ssh/authorized_keys
4115 chmod 600 /root/.ssh/authorized_keys
4116 fi
4117 echo '{key}' >> /root/.ssh/authorized_keys
4118 """.format(key=keys)
4119
4120 bash_script+= keys_data
4121
4122 for user in users_list:
4123 if 'name' in user: user_name = user['name']
4124 if 'key-pairs' in user:
4125 user_keys = "\n".join(user['key-pairs'])
4126 else:
4127 user_keys = None
4128
4129 add_user_name = """
4130 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
4131 """.format(user_name=user_name)
4132
4133 bash_script+= add_user_name
4134
4135 if user_keys:
4136 user_keys_data = """
4137 mkdir /home/{user_name}/.ssh
4138 chown {user_name}:{user_name} /home/{user_name}/.ssh
4139 chmod 700 /home/{user_name}/.ssh
4140 touch /home/{user_name}/.ssh/authorized_keys
4141 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
4142 chmod 600 /home/{user_name}/.ssh/authorized_keys
4143 # make centos with selinux happy
4144 which restorecon && restorecon -Rv /home/{user_name}/.ssh
4145 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
4146 """.format(user_name=user_name,user_key=user_keys)
4147
4148 bash_script+= user_keys_data
4149
4150 return bash_script+"\n\tfi"
4151
4152 def guest_customization(self, vapp, customize_script):
4153 """
4154 Method to customize guest os
4155 vapp - Vapp object
4156 customize_script - Customize script to be run at first boot of VM.
4157 """
4158 for vm in vapp._get_vms():
4159 vm_name = vm.name
4160 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
4161 if isinstance(task, GenericTask):
4162 self.vca.block_until_completed(task)
4163 self.logger.info("guest_customization : customized guest os task "\
4164 "completed for VM {}".format(vm_name))
4165 else:
4166 self.logger.error("guest_customization : task for customized guest os"\
4167 "failed for VM {}".format(vm_name))
4168 raise vimconn.vimconnException("guest_customization : failed to perform"\
4169 "guest os customization on VM {}".format(vm_name))
4170
4171 def add_new_disk(self, vapp_uuid, disk_size):
4172 """
4173 Method to create an empty vm disk
4174
4175 Args:
4176 vapp_uuid - is vapp identifier.
4177 disk_size - size of disk to be created in GB
4178
4179 Returns:
4180 None
4181 """
4182 status = False
4183 vm_details = None
4184 try:
4185 #Disk size in GB, convert it into MB
4186 if disk_size is not None:
4187 disk_size_mb = int(disk_size) * 1024
4188 vm_details = self.get_vapp_details_rest(vapp_uuid)
4189
4190 if vm_details and "vm_virtual_hardware" in vm_details:
4191 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4192 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4193 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4194
4195 except Exception as exp:
4196 msg = "Error occurred while creating new disk {}.".format(exp)
4197 self.rollback_newvm(vapp_uuid, msg)
4198
4199 if status:
4200 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4201 else:
4202 #If failed to add disk, delete VM
4203 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4204 self.rollback_newvm(vapp_uuid, msg)
4205
4206
4207 def add_new_disk_rest(self, disk_href, disk_size_mb):
4208 """
4209 Retrives vApp Disks section & add new empty disk
4210
4211 Args:
4212 disk_href: Disk section href to addd disk
4213 disk_size_mb: Disk size in MB
4214
4215 Returns: Status of add new disk task
4216 """
4217 status = False
4218 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4219 response = Http.get(url=disk_href,
4220 headers=self.vca.vcloud_session.get_vcloud_headers(),
4221 verify=self.vca.verify,
4222 logger=self.vca.logger)
4223
4224 if response.status_code == 403:
4225 response = self.retry_rest('GET', disk_href)
4226
4227 if response.status_code != requests.codes.ok:
4228 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4229 .format(disk_href, response.status_code))
4230 return status
4231 try:
4232 #Find but type & max of instance IDs assigned to disks
4233 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4234 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4235 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4236 instance_id = 0
4237 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4238 if item.find("rasd:Description",namespaces).text == "Hard disk":
4239 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4240 if inst_id > instance_id:
4241 instance_id = inst_id
4242 disk_item = item.find("rasd:HostResource" ,namespaces)
4243 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4244 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4245
4246 instance_id = instance_id + 1
4247 new_item = """<Item>
4248 <rasd:Description>Hard disk</rasd:Description>
4249 <rasd:ElementName>New disk</rasd:ElementName>
4250 <rasd:HostResource
4251 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4252 vcloud:capacity="{}"
4253 vcloud:busSubType="{}"
4254 vcloud:busType="{}"></rasd:HostResource>
4255 <rasd:InstanceID>{}</rasd:InstanceID>
4256 <rasd:ResourceType>17</rasd:ResourceType>
4257 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4258
4259 new_data = response.content
4260 #Add new item at the bottom
4261 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4262
4263 # Send PUT request to modify virtual hardware section with new disk
4264 headers = self.vca.vcloud_session.get_vcloud_headers()
4265 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4266
4267 response = Http.put(url=disk_href,
4268 data=new_data,
4269 headers=headers,
4270 verify=self.vca.verify, logger=self.logger)
4271
4272 if response.status_code == 403:
4273 add_headers = {'Content-Type': headers['Content-Type']}
4274 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4275
4276 if response.status_code != 202:
4277 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4278 .format(disk_href, response.status_code, response.content))
4279 else:
4280 add_disk_task = taskType.parseString(response.content, True)
4281 if type(add_disk_task) is GenericTask:
4282 status = self.vca.block_until_completed(add_disk_task)
4283 if not status:
4284 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4285
4286 except Exception as exp:
4287 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4288
4289 return status
4290
4291
4292 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4293 """
4294 Method to add existing disk to vm
4295 Args :
4296 catalogs - List of VDC catalogs
4297 image_id - Catalog ID
4298 template_name - Name of template in catalog
4299 vapp_uuid - UUID of vApp
4300 Returns:
4301 None
4302 """
4303 disk_info = None
4304 vcenter_conect, content = self.get_vcenter_content()
4305 #find moref-id of vm in image
4306 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4307 image_id=image_id,
4308 )
4309
4310 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4311 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4312 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4313 if catalog_vm_moref_id:
4314 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4315 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4316 if catalog_vm_obj:
4317 #find existing disk
4318 disk_info = self.find_disk(catalog_vm_obj)
4319 else:
4320 exp_msg = "No VM with image id {} found".format(image_id)
4321 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4322 else:
4323 exp_msg = "No Image found with image ID {} ".format(image_id)
4324 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4325
4326 if disk_info:
4327 self.logger.info("Existing disk_info : {}".format(disk_info))
4328 #get VM
4329 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4330 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4331 if vm_obj:
4332 status = self.add_disk(vcenter_conect=vcenter_conect,
4333 vm=vm_obj,
4334 disk_info=disk_info,
4335 size=size,
4336 vapp_uuid=vapp_uuid
4337 )
4338 if status:
4339 self.logger.info("Disk from image id {} added to {}".format(image_id,
4340 vm_obj.config.name)
4341 )
4342 else:
4343 msg = "No disk found with image id {} to add in VM {}".format(
4344 image_id,
4345 vm_obj.config.name)
4346 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4347
4348
4349 def find_disk(self, vm_obj):
4350 """
4351 Method to find details of existing disk in VM
4352 Args :
4353 vm_obj - vCenter object of VM
4354 image_id - Catalog ID
4355 Returns:
4356 disk_info : dict of disk details
4357 """
4358 disk_info = {}
4359 if vm_obj:
4360 try:
4361 devices = vm_obj.config.hardware.device
4362 for device in devices:
4363 if type(device) is vim.vm.device.VirtualDisk:
4364 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4365 disk_info["full_path"] = device.backing.fileName
4366 disk_info["datastore"] = device.backing.datastore
4367 disk_info["capacityKB"] = device.capacityInKB
4368 break
4369 except Exception as exp:
4370 self.logger.error("find_disk() : exception occurred while "\
4371 "getting existing disk details :{}".format(exp))
4372 return disk_info
4373
4374
4375 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4376 """
4377 Method to add existing disk in VM
4378 Args :
4379 vcenter_conect - vCenter content object
4380 vm - vCenter vm object
4381 disk_info : dict of disk details
4382 Returns:
4383 status : status of add disk task
4384 """
4385 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4386 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4387 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4388 if size is not None:
4389 #Convert size from GB to KB
4390 sizeKB = int(size) * 1024 * 1024
4391 #compare size of existing disk and user given size.Assign whicherver is greater
4392 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4393 sizeKB, capacityKB))
4394 if sizeKB > capacityKB:
4395 capacityKB = sizeKB
4396
4397 if datastore and fullpath and capacityKB:
4398 try:
4399 spec = vim.vm.ConfigSpec()
4400 # get all disks on a VM, set unit_number to the next available
4401 unit_number = 0
4402 for dev in vm.config.hardware.device:
4403 if hasattr(dev.backing, 'fileName'):
4404 unit_number = int(dev.unitNumber) + 1
4405 # unit_number 7 reserved for scsi controller
4406 if unit_number == 7:
4407 unit_number += 1
4408 if isinstance(dev, vim.vm.device.VirtualDisk):
4409 #vim.vm.device.VirtualSCSIController
4410 controller_key = dev.controllerKey
4411
4412 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4413 unit_number, controller_key))
4414 # add disk here
4415 dev_changes = []
4416 disk_spec = vim.vm.device.VirtualDeviceSpec()
4417 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4418 disk_spec.device = vim.vm.device.VirtualDisk()
4419 disk_spec.device.backing = \
4420 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4421 disk_spec.device.backing.thinProvisioned = True
4422 disk_spec.device.backing.diskMode = 'persistent'
4423 disk_spec.device.backing.datastore = datastore
4424 disk_spec.device.backing.fileName = fullpath
4425
4426 disk_spec.device.unitNumber = unit_number
4427 disk_spec.device.capacityInKB = capacityKB
4428 disk_spec.device.controllerKey = controller_key
4429 dev_changes.append(disk_spec)
4430 spec.deviceChange = dev_changes
4431 task = vm.ReconfigVM_Task(spec=spec)
4432 status = self.wait_for_vcenter_task(task, vcenter_conect)
4433 return status
4434 except Exception as exp:
4435 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4436 "{} to vm {}".format(exp,
4437 fullpath,
4438 vm.config.name)
4439 self.rollback_newvm(vapp_uuid, exp_msg)
4440 else:
4441 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4442 self.rollback_newvm(vapp_uuid, msg)
4443
4444
4445 def get_vcenter_content(self):
4446 """
4447 Get the vsphere content object
4448 """
4449 try:
4450 vm_vcenter_info = self.get_vm_vcenter_info()
4451 except Exception as exp:
4452 self.logger.error("Error occurred while getting vCenter infromationn"\
4453 " for VM : {}".format(exp))
4454 raise vimconn.vimconnException(message=exp)
4455
4456 context = None
4457 if hasattr(ssl, '_create_unverified_context'):
4458 context = ssl._create_unverified_context()
4459
4460 vcenter_conect = SmartConnect(
4461 host=vm_vcenter_info["vm_vcenter_ip"],
4462 user=vm_vcenter_info["vm_vcenter_user"],
4463 pwd=vm_vcenter_info["vm_vcenter_password"],
4464 port=int(vm_vcenter_info["vm_vcenter_port"]),
4465 sslContext=context
4466 )
4467 atexit.register(Disconnect, vcenter_conect)
4468 content = vcenter_conect.RetrieveContent()
4469 return vcenter_conect, content
4470
4471
4472 def get_vm_moref_id(self, vapp_uuid):
4473 """
4474 Get the moref_id of given VM
4475 """
4476 try:
4477 if vapp_uuid:
4478 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4479 if vm_details and "vm_vcenter_info" in vm_details:
4480 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4481
4482 return vm_moref_id
4483
4484 except Exception as exp:
4485 self.logger.error("Error occurred while getting VM moref ID "\
4486 " for VM : {}".format(exp))
4487 return None
4488
4489
4490 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4491 """
4492 Method to get vApp template details
4493 Args :
4494 catalogs - list of VDC catalogs
4495 image_id - Catalog ID to find
4496 template_name : template name in catalog
4497 Returns:
4498 parsed_respond : dict of vApp tempalte details
4499 """
4500 parsed_response = {}
4501
4502 vca = self.connect_as_admin()
4503 if not vca:
4504 raise vimconn.vimconnConnectionException("self.connect() is failed")
4505
4506 try:
4507 catalog = self.get_catalog_obj(image_id, catalogs)
4508 if catalog:
4509 template_name = self.get_catalogbyid(image_id, catalogs)
4510 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4511 if len(catalog_items) == 1:
4512 response = Http.get(catalog_items[0].get_href(),
4513 headers=vca.vcloud_session.get_vcloud_headers(),
4514 verify=vca.verify,
4515 logger=vca.logger)
4516 catalogItem = XmlElementTree.fromstring(response.content)
4517 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4518 vapp_tempalte_href = entity.get("href")
4519 #get vapp details and parse moref id
4520
4521 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4522 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4523 'vmw': 'http://www.vmware.com/schema/ovf',
4524 'vm': 'http://www.vmware.com/vcloud/v1.5',
4525 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4526 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4527 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4528 }
4529
4530 if vca.vcloud_session and vca.vcloud_session.organization:
4531 response = Http.get(url=vapp_tempalte_href,
4532 headers=vca.vcloud_session.get_vcloud_headers(),
4533 verify=vca.verify,
4534 logger=vca.logger
4535 )
4536
4537 if response.status_code != requests.codes.ok:
4538 self.logger.debug("REST API call {} failed. Return status code {}".format(
4539 vapp_tempalte_href, response.status_code))
4540
4541 else:
4542 xmlroot_respond = XmlElementTree.fromstring(response.content)
4543 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4544 if children_section is not None:
4545 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4546 if vCloud_extension_section is not None:
4547 vm_vcenter_info = {}
4548 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4549 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4550 if vmext is not None:
4551 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4552 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4553
4554 except Exception as exp :
4555 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4556
4557 return parsed_response
4558
4559
4560 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4561 """
4562 Method to delete vApp
4563 Args :
4564 vapp_uuid - vApp UUID
4565 msg - Error message to be logged
4566 exp_type : Exception type
4567 Returns:
4568 None
4569 """
4570 if vapp_uuid:
4571 status = self.delete_vminstance(vapp_uuid)
4572 else:
4573 msg = "No vApp ID"
4574 self.logger.error(msg)
4575 if exp_type == "Genric":
4576 raise vimconn.vimconnException(msg)
4577 elif exp_type == "NotFound":
4578 raise vimconn.vimconnNotFoundException(message=msg)
4579
4580 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4581 """
4582 Method to attach SRIOV adapters to VM
4583
4584 Args:
4585 vapp_uuid - uuid of vApp/VM
4586 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4587 vmname_andid - vmname
4588
4589 Returns:
4590 The status of add SRIOV adapter task , vm object and
4591 vcenter_conect object
4592 """
4593 vm_obj = None
4594 vcenter_conect, content = self.get_vcenter_content()
4595 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4596
4597 if vm_moref_id:
4598 try:
4599 no_of_sriov_devices = len(sriov_nets)
4600 if no_of_sriov_devices > 0:
4601 #Get VM and its host
4602 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4603 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4604 if host_obj and vm_obj:
4605 #get SRIOV devies from host on which vapp is currently installed
4606 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4607 no_of_sriov_devices,
4608 )
4609
4610 if len(avilable_sriov_devices) == 0:
4611 #find other hosts with active pci devices
4612 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4613 content,
4614 no_of_sriov_devices,
4615 )
4616
4617 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4618 #Migrate vm to the host where SRIOV devices are available
4619 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4620 new_host_obj))
4621 task = self.relocate_vm(new_host_obj, vm_obj)
4622 if task is not None:
4623 result = self.wait_for_vcenter_task(task, vcenter_conect)
4624 self.logger.info("Migrate VM status: {}".format(result))
4625 host_obj = new_host_obj
4626 else:
4627 self.logger.info("Fail to migrate VM : {}".format(result))
4628 raise vimconn.vimconnNotFoundException(
4629 "Fail to migrate VM : {} to host {}".format(
4630 vmname_andid,
4631 new_host_obj)
4632 )
4633
4634 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4635 #Add SRIOV devices one by one
4636 for sriov_net in sriov_nets:
4637 network_name = sriov_net.get('net_id')
4638 dvs_portgr_name = self.create_dvPort_group(network_name)
4639 if sriov_net.get('type') == "VF":
4640 #add vlan ID ,Modify portgroup for vlan ID
4641 self.configure_vlanID(content, vcenter_conect, network_name)
4642
4643 task = self.add_sriov_to_vm(content,
4644 vm_obj,
4645 host_obj,
4646 network_name,
4647 avilable_sriov_devices[0]
4648 )
4649 if task:
4650 status= self.wait_for_vcenter_task(task, vcenter_conect)
4651 if status:
4652 self.logger.info("Added SRIOV {} to VM {}".format(
4653 no_of_sriov_devices,
4654 str(vm_obj)))
4655 else:
4656 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4657 no_of_sriov_devices,
4658 str(vm_obj)))
4659 raise vimconn.vimconnUnexpectedResponse(
4660 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4661 )
4662 return True, vm_obj, vcenter_conect
4663 else:
4664 self.logger.error("Currently there is no host with"\
4665 " {} number of avaialble SRIOV "\
4666 "VFs required for VM {}".format(
4667 no_of_sriov_devices,
4668 vmname_andid)
4669 )
4670 raise vimconn.vimconnNotFoundException(
4671 "Currently there is no host with {} "\
4672 "number of avaialble SRIOV devices required for VM {}".format(
4673 no_of_sriov_devices,
4674 vmname_andid))
4675 else:
4676 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4677
4678 except vmodl.MethodFault as error:
4679 self.logger.error("Error occurred while adding SRIOV {} ",error)
4680 return None, vm_obj, vcenter_conect
4681
4682
4683 def get_sriov_devices(self,host, no_of_vfs):
4684 """
4685 Method to get the details of SRIOV devices on given host
4686 Args:
4687 host - vSphere host object
4688 no_of_vfs - number of VFs needed on host
4689
4690 Returns:
4691 array of SRIOV devices
4692 """
4693 sriovInfo=[]
4694 if host:
4695 for device in host.config.pciPassthruInfo:
4696 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4697 if device.numVirtualFunction >= no_of_vfs:
4698 sriovInfo.append(device)
4699 break
4700 return sriovInfo
4701
4702
4703 def get_host_and_sriov_devices(self, content, no_of_vfs):
4704 """
4705 Method to get the details of SRIOV devices infromation on all hosts
4706
4707 Args:
4708 content - vSphere host object
4709 no_of_vfs - number of pci VFs needed on host
4710
4711 Returns:
4712 array of SRIOV devices and host object
4713 """
4714 host_obj = None
4715 sriov_device_objs = None
4716 try:
4717 if content:
4718 container = content.viewManager.CreateContainerView(content.rootFolder,
4719 [vim.HostSystem], True)
4720 for host in container.view:
4721 devices = self.get_sriov_devices(host, no_of_vfs)
4722 if devices:
4723 host_obj = host
4724 sriov_device_objs = devices
4725 break
4726 except Exception as exp:
4727 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4728
4729 return host_obj,sriov_device_objs
4730
4731
4732 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4733 """
4734 Method to add SRIOV adapter to vm
4735
4736 Args:
4737 host_obj - vSphere host object
4738 vm_obj - vSphere vm object
4739 content - vCenter content object
4740 network_name - name of distributed virtaul portgroup
4741 sriov_device - SRIOV device info
4742
4743 Returns:
4744 task object
4745 """
4746 devices = []
4747 vnic_label = "sriov nic"
4748 try:
4749 dvs_portgr = self.get_dvport_group(network_name)
4750 network_name = dvs_portgr.name
4751 nic = vim.vm.device.VirtualDeviceSpec()
4752 # VM device
4753 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4754 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4755 nic.device.addressType = 'assigned'
4756 #nic.device.key = 13016
4757 nic.device.deviceInfo = vim.Description()
4758 nic.device.deviceInfo.label = vnic_label
4759 nic.device.deviceInfo.summary = network_name
4760 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4761
4762 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4763 nic.device.backing.deviceName = network_name
4764 nic.device.backing.useAutoDetect = False
4765 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4766 nic.device.connectable.startConnected = True
4767 nic.device.connectable.allowGuestControl = True
4768
4769 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4770 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4771 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4772
4773 devices.append(nic)
4774 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4775 task = vm_obj.ReconfigVM_Task(vmconf)
4776 return task
4777 except Exception as exp:
4778 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4779 return None
4780
4781
4782 def create_dvPort_group(self, network_name):
4783 """
4784 Method to create disributed virtual portgroup
4785
4786 Args:
4787 network_name - name of network/portgroup
4788
4789 Returns:
4790 portgroup key
4791 """
4792 try:
4793 new_network_name = [network_name, '-', str(uuid.uuid4())]
4794 network_name=''.join(new_network_name)
4795 vcenter_conect, content = self.get_vcenter_content()
4796
4797 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4798 if dv_switch:
4799 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4800 dv_pg_spec.name = network_name
4801
4802 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4803 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4804 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4805 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4806 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4807 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4808
4809 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4810 self.wait_for_vcenter_task(task, vcenter_conect)
4811
4812 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4813 if dvPort_group:
4814 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4815 return dvPort_group.key
4816 else:
4817 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4818
4819 except Exception as exp:
4820 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4821 " : {}".format(network_name, exp))
4822 return None
4823
4824 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4825 """
4826 Method to reconfigure disributed virtual portgroup
4827
4828 Args:
4829 dvPort_group_name - name of disributed virtual portgroup
4830 content - vCenter content object
4831 config_info - disributed virtual portgroup configuration
4832
4833 Returns:
4834 task object
4835 """
4836 try:
4837 dvPort_group = self.get_dvport_group(dvPort_group_name)
4838 if dvPort_group:
4839 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4840 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4841 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4842 if "vlanID" in config_info:
4843 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4844 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4845
4846 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4847 return task
4848 else:
4849 return None
4850 except Exception as exp:
4851 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4852 " : {}".format(dvPort_group_name, exp))
4853 return None
4854
4855
4856 def destroy_dvport_group(self , dvPort_group_name):
4857 """
4858 Method to destroy disributed virtual portgroup
4859
4860 Args:
4861 network_name - name of network/portgroup
4862
4863 Returns:
4864 True if portgroup successfully got deleted else false
4865 """
4866 vcenter_conect, content = self.get_vcenter_content()
4867 try:
4868 status = None
4869 dvPort_group = self.get_dvport_group(dvPort_group_name)
4870 if dvPort_group:
4871 task = dvPort_group.Destroy_Task()
4872 status = self.wait_for_vcenter_task(task, vcenter_conect)
4873 return status
4874 except vmodl.MethodFault as exp:
4875 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4876 exp, dvPort_group_name))
4877 return None
4878
4879
4880 def get_dvport_group(self, dvPort_group_name):
4881 """
4882 Method to get disributed virtual portgroup
4883
4884 Args:
4885 network_name - name of network/portgroup
4886
4887 Returns:
4888 portgroup object
4889 """
4890 vcenter_conect, content = self.get_vcenter_content()
4891 dvPort_group = None
4892 try:
4893 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4894 for item in container.view:
4895 if item.key == dvPort_group_name:
4896 dvPort_group = item
4897 break
4898 return dvPort_group
4899 except vmodl.MethodFault as exp:
4900 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4901 exp, dvPort_group_name))
4902 return None
4903
4904 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4905 """
4906 Method to get disributed virtual portgroup vlanID
4907
4908 Args:
4909 network_name - name of network/portgroup
4910
4911 Returns:
4912 vlan ID
4913 """
4914 vlanId = None
4915 try:
4916 dvPort_group = self.get_dvport_group(dvPort_group_name)
4917 if dvPort_group:
4918 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4919 except vmodl.MethodFault as exp:
4920 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4921 exp, dvPort_group_name))
4922 return vlanId
4923
4924
4925 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4926 """
4927 Method to configure vlanID in disributed virtual portgroup vlanID
4928
4929 Args:
4930 network_name - name of network/portgroup
4931
4932 Returns:
4933 None
4934 """
4935 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4936 if vlanID == 0:
4937 #configure vlanID
4938 vlanID = self.genrate_vlanID(dvPort_group_name)
4939 config = {"vlanID":vlanID}
4940 task = self.reconfig_portgroup(content, dvPort_group_name,
4941 config_info=config)
4942 if task:
4943 status= self.wait_for_vcenter_task(task, vcenter_conect)
4944 if status:
4945 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4946 dvPort_group_name,vlanID))
4947 else:
4948 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4949 dvPort_group_name, vlanID))
4950
4951
4952 def genrate_vlanID(self, network_name):
4953 """
4954 Method to get unused vlanID
4955 Args:
4956 network_name - name of network/portgroup
4957 Returns:
4958 vlanID
4959 """
4960 vlan_id = None
4961 used_ids = []
4962 if self.config.get('vlanID_range') == None:
4963 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4964 "at config value before creating sriov network with vlan tag")
4965 if "used_vlanIDs" not in self.persistent_info:
4966 self.persistent_info["used_vlanIDs"] = {}
4967 else:
4968 used_ids = self.persistent_info["used_vlanIDs"].values()
4969
4970 for vlanID_range in self.config.get('vlanID_range'):
4971 start_vlanid , end_vlanid = vlanID_range.split("-")
4972 if start_vlanid > end_vlanid:
4973 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4974 vlanID_range))
4975
4976 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4977 if id not in used_ids:
4978 vlan_id = id
4979 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4980 return vlan_id
4981 if vlan_id is None:
4982 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4983
4984
4985 def get_obj(self, content, vimtype, name):
4986 """
4987 Get the vsphere object associated with a given text name
4988 """
4989 obj = None
4990 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4991 for item in container.view:
4992 if item.name == name:
4993 obj = item
4994 break
4995 return obj
4996
4997
4998 def insert_media_to_vm(self, vapp, image_id):
4999 """
5000 Method to insert media CD-ROM (ISO image) from catalog to vm.
5001 vapp - vapp object to get vm id
5002 Image_id - image id for cdrom to be inerted to vm
5003 """
5004 # create connection object
5005 vca = self.connect()
5006 try:
5007 # fetching catalog details
5008 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
5009 response = Http.get(url=rest_url,
5010 headers=vca.vcloud_session.get_vcloud_headers(),
5011 verify=vca.verify,
5012 logger=vca.logger)
5013
5014 if response.status_code != 200:
5015 self.logger.error("REST call {} failed reason : {}"\
5016 "status code : {}".format(url_rest_call,
5017 response.content,
5018 response.status_code))
5019 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
5020 "catalog details")
5021 # searching iso name and id
5022 iso_name,media_id = self.get_media_details(vca, response.content)
5023
5024 if iso_name and media_id:
5025 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5026 <ns6:MediaInsertOrEjectParams
5027 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
5028 <ns6:Media
5029 type="application/vnd.vmware.vcloud.media+xml"
5030 name="{}.iso"
5031 id="urn:vcloud:media:{}"
5032 href="https://{}/api/media/{}"/>
5033 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
5034 vca.host,media_id)
5035
5036 for vms in vapp._get_vms():
5037 vm_id = (vms.id).split(':')[-1]
5038
5039 headers = vca.vcloud_session.get_vcloud_headers()
5040 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
5041 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
5042
5043 response = Http.post(url=rest_url,
5044 headers=headers,
5045 data=data,
5046 verify=vca.verify,
5047 logger=vca.logger)
5048
5049 if response.status_code != 202:
5050 self.logger.error("Failed to insert CD-ROM to vm")
5051 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
5052 "ISO image to vm")
5053 else:
5054 task = taskType.parseString(response.content, True)
5055 if isinstance(task, GenericTask):
5056 vca.block_until_completed(task)
5057 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
5058 " image to vm {}".format(vm_id))
5059 except Exception as exp:
5060 self.logger.error("insert_media_to_vm() : exception occurred "\
5061 "while inserting media CD-ROM")
5062 raise vimconn.vimconnException(message=exp)
5063
5064
5065 def get_media_details(self, vca, content):
5066 """
5067 Method to get catalog item details
5068 vca - connection object
5069 content - Catalog details
5070 Return - Media name, media id
5071 """
5072 cataloghref_list = []
5073 try:
5074 if content:
5075 vm_list_xmlroot = XmlElementTree.fromstring(content)
5076 for child in vm_list_xmlroot.iter():
5077 if 'CatalogItem' in child.tag:
5078 cataloghref_list.append(child.attrib.get('href'))
5079 if cataloghref_list is not None:
5080 for href in cataloghref_list:
5081 if href:
5082 response = Http.get(url=href,
5083 headers=vca.vcloud_session.get_vcloud_headers(),
5084 verify=vca.verify,
5085 logger=vca.logger)
5086 if response.status_code != 200:
5087 self.logger.error("REST call {} failed reason : {}"\
5088 "status code : {}".format(href,
5089 response.content,
5090 response.status_code))
5091 raise vimconn.vimconnException("get_media_details : Failed to get "\
5092 "catalogitem details")
5093 list_xmlroot = XmlElementTree.fromstring(response.content)
5094 for child in list_xmlroot.iter():
5095 if 'Entity' in child.tag:
5096 if 'media' in child.attrib.get('href'):
5097 name = child.attrib.get('name')
5098 media_id = child.attrib.get('href').split('/').pop()
5099 return name,media_id
5100 else:
5101 self.logger.debug("Media name and id not found")
5102 return False,False
5103 except Exception as exp:
5104 self.logger.error("get_media_details : exception occurred "\
5105 "getting media details")
5106 raise vimconn.vimconnException(message=exp)
5107
5108
5109 def retry_rest(self, method, url, add_headers=None, data=None):
5110 """ Method to get Token & retry respective REST request
5111 Args:
5112 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
5113 url - request url to be used
5114 add_headers - Additional headers (optional)
5115 data - Request payload data to be passed in request
5116 Returns:
5117 response - Response of request
5118 """
5119 response = None
5120
5121 #Get token
5122 self.get_token()
5123
5124 headers=self.vca.vcloud_session.get_vcloud_headers()
5125
5126 if add_headers:
5127 headers.update(add_headers)
5128
5129 if method == 'GET':
5130 response = Http.get(url=url,
5131 headers=headers,
5132 verify=self.vca.verify,
5133 logger=self.vca.logger)
5134 elif method == 'PUT':
5135 response = Http.put(url=url,
5136 data=data,
5137 headers=headers,
5138 verify=self.vca.verify,
5139 logger=self.logger)
5140 elif method == 'POST':
5141 response = Http.post(url=url,
5142 headers=headers,
5143 data=data,
5144 verify=self.vca.verify,
5145 logger=self.vca.logger)
5146 elif method == 'DELETE':
5147 response = Http.delete(url=url,
5148 headers=headers,
5149 verify=self.vca.verify,
5150 logger=self.vca.logger)
5151 return response
5152
5153
5154 def get_token(self):
5155 """ Generate a new token if expired
5156
5157 Returns:
5158 The return vca object that letter can be used to connect to vCloud director as admin for VDC
5159 """
5160 vca = None
5161
5162 try:
5163 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
5164 self.user,
5165 self.org_name))
5166 vca = VCA(host=self.url,
5167 username=self.user,
5168 service_type=STANDALONE,
5169 version=VCAVERSION,
5170 verify=False,
5171 log=False)
5172
5173 result = vca.login(password=self.passwd, org=self.org_name)
5174 if result is True:
5175 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
5176 if result is True:
5177 self.logger.info(
5178 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
5179 #Update vca
5180 self.vca = vca
5181 return
5182
5183 except:
5184 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
5185 "{} as user: {}".format(self.org_name, self.user))
5186
5187 if not vca or not result:
5188 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5189
5190
5191 def get_vdc_details(self):
5192 """ Get VDC details using pyVcloud Lib
5193
5194 Returns vdc object
5195 """
5196 vdc = self.vca.get_vdc(self.tenant_name)
5197
5198 #Retry once, if failed by refreshing token
5199 if vdc is None:
5200 self.get_token()
5201 vdc = self.vca.get_vdc(self.tenant_name)
5202
5203 return vdc