Added code for CD-ROM device type
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 try:
361 if self.org_uuid is None:
362 org_dict = self.get_org_list()
363 for org in org_dict:
364 # we set org UUID at the init phase but we can do it only when we have valid credential.
365 if org_dict[org] == self.org_name:
366 self.org_uuid = org
367 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
368 break
369 else:
370 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
371
372 # if well good we require for org details
373 org_details_dict = self.get_org(org_uuid=self.org_uuid)
374
375 # we have two case if we want to initialize VDC ID or VDC name at run time
376 # tenant_name provided but no tenant id
377 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
378 vdcs_dict = org_details_dict['vdcs']
379 for vdc in vdcs_dict:
380 if vdcs_dict[vdc] == self.tenant_name:
381 self.tenant_id = vdc
382 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
383 self.org_name))
384 break
385 else:
386 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
387 # case two we have tenant_id but we don't have tenant name so we find and set it.
388 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
389 vdcs_dict = org_details_dict['vdcs']
390 for vdc in vdcs_dict:
391 if vdc == self.tenant_id:
392 self.tenant_name = vdcs_dict[vdc]
393 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
394 self.org_name))
395 break
396 else:
397 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
398 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
399 except:
400 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
401 self.logger.debug(traceback.format_exc())
402 self.org_uuid = None
403
404 def new_tenant(self, tenant_name=None, tenant_description=None):
405 """ Method adds a new tenant to VIM with this name.
406 This action requires access to create VDC action in vCloud director.
407
408 Args:
409 tenant_name is tenant_name to be created.
410 tenant_description not used for this call
411
412 Return:
413 returns the tenant identifier in UUID format.
414 If action is failed method will throw vimconn.vimconnException method
415 """
416 vdc_task = self.create_vdc(vdc_name=tenant_name)
417 if vdc_task is not None:
418 vdc_uuid, value = vdc_task.popitem()
419 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
420 return vdc_uuid
421 else:
422 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
423
424 def delete_tenant(self, tenant_id=None):
425 """Delete a tenant from VIM"""
426 'Returns the tenant identifier'
427 raise vimconn.vimconnNotImplemented("Should have implemented this")
428
429 def get_tenant_list(self, filter_dict={}):
430 """Obtain tenants of VIM
431 filter_dict can contain the following keys:
432 name: filter by tenant name
433 id: filter by tenant uuid/id
434 <other VIM specific>
435 Returns the tenant list of dictionaries:
436 [{'name':'<name>, 'id':'<id>, ...}, ...]
437
438 """
439 org_dict = self.get_org(self.org_uuid)
440 vdcs_dict = org_dict['vdcs']
441
442 vdclist = []
443 try:
444 for k in vdcs_dict:
445 entry = {'name': vdcs_dict[k], 'id': k}
446 # if caller didn't specify dictionary we return all tenants.
447 if filter_dict is not None and filter_dict:
448 filtered_entry = entry.copy()
449 filtered_dict = set(entry.keys()) - set(filter_dict)
450 for unwanted_key in filtered_dict: del entry[unwanted_key]
451 if filter_dict == entry:
452 vdclist.append(filtered_entry)
453 else:
454 vdclist.append(entry)
455 except:
456 self.logger.debug("Error in get_tenant_list()")
457 self.logger.debug(traceback.format_exc())
458 raise vimconn.vimconnException("Incorrect state. {}")
459
460 return vdclist
461
462 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
463 """Adds a tenant network to VIM
464 net_name is the name
465 net_type can be 'bridge','data'.'ptp'.
466 ip_profile is a dict containing the IP parameters of the network
467 shared is a boolean
468 Returns the network identifier"""
469
470 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
471 .format(net_name, net_type, ip_profile, shared))
472
473 isshared = 'false'
474 if shared:
475 isshared = 'true'
476
477 # ############# Stub code for SRIOV #################
478 # if net_type == "data" or net_type == "ptp":
479 # if self.config.get('dv_switch_name') == None:
480 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
481 # network_uuid = self.create_dvPort_group(net_name)
482
483 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
484 ip_profile=ip_profile, isshared=isshared)
485 if network_uuid is not None:
486 return network_uuid
487 else:
488 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
489
490 def get_vcd_network_list(self):
491 """ Method available organization for a logged in tenant
492
493 Returns:
494 The return vca object that letter can be used to connect to vcloud direct as admin
495 """
496
497 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
498 vca = self.connect()
499 if not vca:
500 raise vimconn.vimconnConnectionException("self.connect() is failed.")
501
502 if not self.tenant_name:
503 raise vimconn.vimconnConnectionException("Tenant name is empty.")
504
505 vdc = vca.get_vdc(self.tenant_name)
506 if vdc is None:
507 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
508
509 vdc_uuid = vdc.get_id().split(":")[3]
510 networks = vca.get_networks(vdc.get_name())
511 network_list = []
512 try:
513 for network in networks:
514 filter_dict = {}
515 netid = network.get_id().split(":")
516 if len(netid) != 4:
517 continue
518
519 filter_dict["name"] = network.get_name()
520 filter_dict["id"] = netid[3]
521 filter_dict["shared"] = network.get_IsShared()
522 filter_dict["tenant_id"] = vdc_uuid
523 if network.get_status() == 1:
524 filter_dict["admin_state_up"] = True
525 else:
526 filter_dict["admin_state_up"] = False
527 filter_dict["status"] = "ACTIVE"
528 filter_dict["type"] = "bridge"
529 network_list.append(filter_dict)
530 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
531 except:
532 self.logger.debug("Error in get_vcd_network_list")
533 self.logger.debug(traceback.format_exc())
534 pass
535
536 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
537 return network_list
538
539 def get_network_list(self, filter_dict={}):
540 """Obtain tenant networks of VIM
541 Filter_dict can be:
542 name: network name OR/AND
543 id: network uuid OR/AND
544 shared: boolean OR/AND
545 tenant_id: tenant OR/AND
546 admin_state_up: boolean
547 status: 'ACTIVE'
548
549 [{key : value , key : value}]
550
551 Returns the network list of dictionaries:
552 [{<the fields at Filter_dict plus some VIM specific>}, ...]
553 List can be empty
554 """
555
556 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
557 vca = self.connect()
558 if not vca:
559 raise vimconn.vimconnConnectionException("self.connect() is failed.")
560
561 if not self.tenant_name:
562 raise vimconn.vimconnConnectionException("Tenant name is empty.")
563
564 vdc = vca.get_vdc(self.tenant_name)
565 if vdc is None:
566 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
567
568 try:
569 vdcid = vdc.get_id().split(":")[3]
570 networks = vca.get_networks(vdc.get_name())
571 network_list = []
572
573 for network in networks:
574 filter_entry = {}
575 net_uuid = network.get_id().split(":")
576 if len(net_uuid) != 4:
577 continue
578 else:
579 net_uuid = net_uuid[3]
580 # create dict entry
581 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
582 vdcid,
583 network.get_name()))
584 filter_entry["name"] = network.get_name()
585 filter_entry["id"] = net_uuid
586 filter_entry["shared"] = network.get_IsShared()
587 filter_entry["tenant_id"] = vdcid
588 if network.get_status() == 1:
589 filter_entry["admin_state_up"] = True
590 else:
591 filter_entry["admin_state_up"] = False
592 filter_entry["status"] = "ACTIVE"
593 filter_entry["type"] = "bridge"
594 filtered_entry = filter_entry.copy()
595
596 if filter_dict is not None and filter_dict:
597 # we remove all the key : value we don't care and match only
598 # respected field
599 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
600 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
601 if filter_dict == filter_entry:
602 network_list.append(filtered_entry)
603 else:
604 network_list.append(filtered_entry)
605 except:
606 self.logger.debug("Error in get_vcd_network_list")
607 self.logger.debug(traceback.format_exc())
608
609 self.logger.debug("Returning {}".format(network_list))
610 return network_list
611
612 def get_network(self, net_id):
613 """Method obtains network details of net_id VIM network
614 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
615
616 vca = self.connect()
617 if not vca:
618 raise vimconn.vimconnConnectionException("self.connect() is failed")
619
620 try:
621 vdc = vca.get_vdc(self.tenant_name)
622 vdc_id = vdc.get_id().split(":")[3]
623
624 networks = vca.get_networks(vdc.get_name())
625 filter_dict = {}
626
627 for network in networks:
628 vdc_network_id = network.get_id().split(":")
629 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
630 filter_dict["name"] = network.get_name()
631 filter_dict["id"] = vdc_network_id[3]
632 filter_dict["shared"] = network.get_IsShared()
633 filter_dict["tenant_id"] = vdc_id
634 if network.get_status() == 1:
635 filter_dict["admin_state_up"] = True
636 else:
637 filter_dict["admin_state_up"] = False
638 filter_dict["status"] = "ACTIVE"
639 filter_dict["type"] = "bridge"
640 self.logger.debug("Returning {}".format(filter_dict))
641 return filter_dict
642 except:
643 self.logger.debug("Error in get_network")
644 self.logger.debug(traceback.format_exc())
645
646 return filter_dict
647
648 def delete_network(self, net_id):
649 """
650 Method Deletes a tenant network from VIM, provide the network id.
651
652 Returns the network identifier or raise an exception
653 """
654
655 vca = self.connect()
656 if not vca:
657 raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
658
659 # ############# Stub code for SRIOV #################
660 # dvport_group = self.get_dvport_group(net_id)
661 # if dvport_group:
662 # #delete portgroup
663 # status = self.destroy_dvport_group(net_id)
664 # if status:
665 # # Remove vlanID from persistent info
666 # if net_id in self.persistent_info["used_vlanIDs"]:
667 # del self.persistent_info["used_vlanIDs"][net_id]
668 #
669 # return net_id
670
671 vcd_network = self.get_vcd_network(network_uuid=net_id)
672 if vcd_network is not None and vcd_network:
673 if self.delete_network_action(network_uuid=net_id):
674 return net_id
675 else:
676 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
677
678 def refresh_nets_status(self, net_list):
679 """Get the status of the networks
680 Params: the list of network identifiers
681 Returns a dictionary with:
682 net_id: #VIM id of this network
683 status: #Mandatory. Text with one of:
684 # DELETED (not found at vim)
685 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
686 # OTHER (Vim reported other status not understood)
687 # ERROR (VIM indicates an ERROR status)
688 # ACTIVE, INACTIVE, DOWN (admin down),
689 # BUILD (on building process)
690 #
691 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
692 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
693
694 """
695
696 vca = self.connect()
697 if not vca:
698 raise vimconn.vimconnConnectionException("self.connect() is failed")
699
700 dict_entry = {}
701 try:
702 for net in net_list:
703 errormsg = ''
704 vcd_network = self.get_vcd_network(network_uuid=net)
705 if vcd_network is not None and vcd_network:
706 if vcd_network['status'] == '1':
707 status = 'ACTIVE'
708 else:
709 status = 'DOWN'
710 else:
711 status = 'DELETED'
712 errormsg = 'Network not found.'
713
714 dict_entry[net] = {'status': status, 'error_msg': errormsg,
715 'vim_info': yaml.safe_dump(vcd_network)}
716 except:
717 self.logger.debug("Error in refresh_nets_status")
718 self.logger.debug(traceback.format_exc())
719
720 return dict_entry
721
722 def get_flavor(self, flavor_id):
723 """Obtain flavor details from the VIM
724 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
725 """
726 if flavor_id not in vimconnector.flavorlist:
727 raise vimconn.vimconnNotFoundException("Flavor not found.")
728 return vimconnector.flavorlist[flavor_id]
729
730 def new_flavor(self, flavor_data):
731 """Adds a tenant flavor to VIM
732 flavor_data contains a dictionary with information, keys:
733 name: flavor name
734 ram: memory (cloud type) in MBytes
735 vpcus: cpus (cloud type)
736 extended: EPA parameters
737 - numas: #items requested in same NUMA
738 memory: number of 1G huge pages memory
739 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
740 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
741 - name: interface name
742 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
743 bandwidth: X Gbps; requested guarantee bandwidth
744 vpci: requested virtual PCI address
745 disk: disk size
746 is_public:
747 #TODO to concrete
748 Returns the flavor identifier"""
749
750 # generate a new uuid put to internal dict and return it.
751 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
752 new_flavor=flavor_data
753 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
754 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
755 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
756
757 extended_flv = flavor_data.get("extended")
758 if extended_flv:
759 numas=extended_flv.get("numas")
760 if numas:
761 for numa in numas:
762 #overwrite ram and vcpus
763 ram = numa['memory']*1024
764 if 'paired-threads' in numa:
765 cpu = numa['paired-threads']*2
766 elif 'cores' in numa:
767 cpu = numa['cores']
768 elif 'threads' in numa:
769 cpu = numa['threads']
770
771 new_flavor[FLAVOR_RAM_KEY] = ram
772 new_flavor[FLAVOR_VCPUS_KEY] = cpu
773 new_flavor[FLAVOR_DISK_KEY] = disk
774 # generate a new uuid put to internal dict and return it.
775 flavor_id = uuid.uuid4()
776 vimconnector.flavorlist[str(flavor_id)] = new_flavor
777 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
778
779 return str(flavor_id)
780
781 def delete_flavor(self, flavor_id):
782 """Deletes a tenant flavor from VIM identify by its id
783
784 Returns the used id or raise an exception
785 """
786 if flavor_id not in vimconnector.flavorlist:
787 raise vimconn.vimconnNotFoundException("Flavor not found.")
788
789 vimconnector.flavorlist.pop(flavor_id, None)
790 return flavor_id
791
792 def new_image(self, image_dict):
793 """
794 Adds a tenant image to VIM
795 Returns:
796 200, image-id if the image is created
797 <0, message if there is an error
798 """
799
800 return self.get_image_id_from_path(image_dict['location'])
801
802 def delete_image(self, image_id):
803 """
804
805 :param image_id:
806 :return:
807 """
808
809 raise vimconn.vimconnNotImplemented("Should have implemented this")
810
811 def catalog_exists(self, catalog_name, catalogs):
812 """
813
814 :param catalog_name:
815 :param catalogs:
816 :return:
817 """
818 for catalog in catalogs:
819 if catalog.name == catalog_name:
820 return True
821 return False
822
823 def create_vimcatalog(self, vca=None, catalog_name=None):
824 """ Create new catalog entry in vCloud director.
825
826 Args
827 vca: vCloud director.
828 catalog_name catalog that client wish to create. Note no validation done for a name.
829 Client must make sure that provide valid string representation.
830
831 Return (bool) True if catalog created.
832
833 """
834 try:
835 task = vca.create_catalog(catalog_name, catalog_name)
836 result = vca.block_until_completed(task)
837 if not result:
838 return False
839 catalogs = vca.get_catalogs()
840 except:
841 return False
842 return self.catalog_exists(catalog_name, catalogs)
843
844 # noinspection PyIncorrectDocstring
845 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
846 description='', progress=False, chunk_bytes=128 * 1024):
847 """
848 Uploads a OVF file to a vCloud catalog
849
850 :param chunk_bytes:
851 :param progress:
852 :param description:
853 :param image_name:
854 :param vca:
855 :param catalog_name: (str): The name of the catalog to upload the media.
856 :param media_file_name: (str): The name of the local media file to upload.
857 :return: (bool) True if the media file was successfully uploaded, false otherwise.
858 """
859 os.path.isfile(media_file_name)
860 statinfo = os.stat(media_file_name)
861
862 # find a catalog entry where we upload OVF.
863 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
864 # status change.
865 # if VCD can parse OVF we upload VMDK file
866 try:
867 for catalog in vca.get_catalogs():
868 if catalog_name != catalog.name:
869 continue
870 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
871 link.get_rel() == 'add', catalog.get_Link())
872 assert len(link) == 1
873 data = """
874 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
875 """ % (escape(catalog_name), escape(description))
876 headers = vca.vcloud_session.get_vcloud_headers()
877 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
878 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
879 if response.status_code == requests.codes.created:
880 catalogItem = XmlElementTree.fromstring(response.content)
881 entity = [child for child in catalogItem if
882 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
883 href = entity.get('href')
884 template = href
885 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
886 verify=vca.verify, logger=self.logger)
887
888 if response.status_code == requests.codes.ok:
889 media = mediaType.parseString(response.content, True)
890 link = filter(lambda link: link.get_rel() == 'upload:default',
891 media.get_Files().get_File()[0].get_Link())[0]
892 headers = vca.vcloud_session.get_vcloud_headers()
893 headers['Content-Type'] = 'Content-Type text/xml'
894 response = Http.put(link.get_href(),
895 data=open(media_file_name, 'rb'),
896 headers=headers,
897 verify=vca.verify, logger=self.logger)
898 if response.status_code != requests.codes.ok:
899 self.logger.debug(
900 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
901 media_file_name))
902 return False
903
904 # TODO fix this with aync block
905 time.sleep(5)
906
907 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
908
909 # uploading VMDK file
910 # check status of OVF upload and upload remaining files.
911 response = Http.get(template,
912 headers=vca.vcloud_session.get_vcloud_headers(),
913 verify=vca.verify,
914 logger=self.logger)
915
916 if response.status_code == requests.codes.ok:
917 media = mediaType.parseString(response.content, True)
918 number_of_files = len(media.get_Files().get_File())
919 for index in xrange(0, number_of_files):
920 links_list = filter(lambda link: link.get_rel() == 'upload:default',
921 media.get_Files().get_File()[index].get_Link())
922 for link in links_list:
923 # we skip ovf since it already uploaded.
924 if 'ovf' in link.get_href():
925 continue
926 # The OVF file and VMDK must be in a same directory
927 head, tail = os.path.split(media_file_name)
928 file_vmdk = head + '/' + link.get_href().split("/")[-1]
929 if not os.path.isfile(file_vmdk):
930 return False
931 statinfo = os.stat(file_vmdk)
932 if statinfo.st_size == 0:
933 return False
934 hrefvmdk = link.get_href()
935
936 if progress:
937 print("Uploading file: {}".format(file_vmdk))
938 if progress:
939 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
940 FileTransferSpeed()]
941 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
942
943 bytes_transferred = 0
944 f = open(file_vmdk, 'rb')
945 while bytes_transferred < statinfo.st_size:
946 my_bytes = f.read(chunk_bytes)
947 if len(my_bytes) <= chunk_bytes:
948 headers = vca.vcloud_session.get_vcloud_headers()
949 headers['Content-Range'] = 'bytes %s-%s/%s' % (
950 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
951 headers['Content-Length'] = str(len(my_bytes))
952 response = Http.put(hrefvmdk,
953 headers=headers,
954 data=my_bytes,
955 verify=vca.verify,
956 logger=None)
957
958 if response.status_code == requests.codes.ok:
959 bytes_transferred += len(my_bytes)
960 if progress:
961 progress_bar.update(bytes_transferred)
962 else:
963 self.logger.debug(
964 'file upload failed with error: [%s] %s' % (response.status_code,
965 response.content))
966
967 f.close()
968 return False
969 f.close()
970 if progress:
971 progress_bar.finish()
972 time.sleep(10)
973 return True
974 else:
975 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
976 format(catalog_name, media_file_name))
977 return False
978 except Exception as exp:
979 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
980 .format(catalog_name,media_file_name, exp))
981 raise vimconn.vimconnException(
982 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
983 .format(catalog_name,media_file_name, exp))
984
985 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
986 return False
987
988 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
989 """Upload media file"""
990 # TODO add named parameters for readability
991
992 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
993 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
994
995 def validate_uuid4(self, uuid_string=None):
996 """ Method validate correct format of UUID.
997
998 Return: true if string represent valid uuid
999 """
1000 try:
1001 val = uuid.UUID(uuid_string, version=4)
1002 except ValueError:
1003 return False
1004 return True
1005
1006 def get_catalogid(self, catalog_name=None, catalogs=None):
1007 """ Method check catalog and return catalog ID in UUID format.
1008
1009 Args
1010 catalog_name: catalog name as string
1011 catalogs: list of catalogs.
1012
1013 Return: catalogs uuid
1014 """
1015
1016 for catalog in catalogs:
1017 if catalog.name == catalog_name:
1018 catalog_id = catalog.get_id().split(":")
1019 return catalog_id[3]
1020 return None
1021
1022 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1023 """ Method check catalog and return catalog name lookup done by catalog UUID.
1024
1025 Args
1026 catalog_name: catalog name as string
1027 catalogs: list of catalogs.
1028
1029 Return: catalogs name or None
1030 """
1031
1032 if not self.validate_uuid4(uuid_string=catalog_uuid):
1033 return None
1034
1035 for catalog in catalogs:
1036 catalog_id = catalog.get_id().split(":")[3]
1037 if catalog_id == catalog_uuid:
1038 return catalog.name
1039 return None
1040
1041 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1042 """ Method check catalog and return catalog name lookup done by catalog UUID.
1043
1044 Args
1045 catalog_name: catalog name as string
1046 catalogs: list of catalogs.
1047
1048 Return: catalogs name or None
1049 """
1050
1051 if not self.validate_uuid4(uuid_string=catalog_uuid):
1052 return None
1053
1054 for catalog in catalogs:
1055 catalog_id = catalog.get_id().split(":")[3]
1056 if catalog_id == catalog_uuid:
1057 return catalog
1058 return None
1059
1060 def get_image_id_from_path(self, path=None, progress=False):
1061 """ Method upload OVF image to vCloud director.
1062
1063 Each OVF image represented as single catalog entry in vcloud director.
1064 The method check for existing catalog entry. The check done by file name without file extension.
1065
1066 if given catalog name already present method will respond with existing catalog uuid otherwise
1067 it will create new catalog entry and upload OVF file to newly created catalog.
1068
1069 If method can't create catalog entry or upload a file it will throw exception.
1070
1071 Method accept boolean flag progress that will output progress bar. It useful method
1072 for standalone upload use case. In case to test large file upload.
1073
1074 Args
1075 path: - valid path to OVF file.
1076 progress - boolean progress bar show progress bar.
1077
1078 Return: if image uploaded correct method will provide image catalog UUID.
1079 """
1080 vca = self.connect()
1081 if not vca:
1082 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1083
1084 if not path:
1085 raise vimconn.vimconnException("Image path can't be None.")
1086
1087 if not os.path.isfile(path):
1088 raise vimconn.vimconnException("Can't read file. File not found.")
1089
1090 if not os.access(path, os.R_OK):
1091 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1092
1093 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1094
1095 dirpath, filename = os.path.split(path)
1096 flname, file_extension = os.path.splitext(path)
1097 if file_extension != '.ovf':
1098 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1099 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1100
1101 catalog_name = os.path.splitext(filename)[0]
1102 catalog_md5_name = hashlib.md5(path).hexdigest()
1103 self.logger.debug("File name {} Catalog Name {} file path {} "
1104 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1105
1106 try:
1107 catalogs = vca.get_catalogs()
1108 except Exception as exp:
1109 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1110 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1111
1112 if len(catalogs) == 0:
1113 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1114 result = self.create_vimcatalog(vca, catalog_md5_name)
1115 if not result:
1116 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1117 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1118 media_name=filename, medial_file_name=path, progress=progress)
1119 if not result:
1120 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1121 return self.get_catalogid(catalog_name, vca.get_catalogs())
1122 else:
1123 for catalog in catalogs:
1124 # search for existing catalog if we find same name we return ID
1125 # TODO optimize this
1126 if catalog.name == catalog_md5_name:
1127 self.logger.debug("Found existing catalog entry for {} "
1128 "catalog id {}".format(catalog_name,
1129 self.get_catalogid(catalog_md5_name, catalogs)))
1130 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1131
1132 # if we didn't find existing catalog we create a new one and upload image.
1133 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1134 result = self.create_vimcatalog(vca, catalog_md5_name)
1135 if not result:
1136 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1137
1138 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1139 media_name=filename, medial_file_name=path, progress=progress)
1140 if not result:
1141 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1142
1143 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1144
1145 def get_image_list(self, filter_dict={}):
1146 '''Obtain tenant images from VIM
1147 Filter_dict can be:
1148 name: image name
1149 id: image uuid
1150 checksum: image checksum
1151 location: image path
1152 Returns the image list of dictionaries:
1153 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1154 List can be empty
1155 '''
1156 vca = self.connect()
1157 if not vca:
1158 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1159 try:
1160 image_list = []
1161 catalogs = vca.get_catalogs()
1162 if len(catalogs) == 0:
1163 return image_list
1164 else:
1165 for catalog in catalogs:
1166 catalog_uuid = catalog.get_id().split(":")[3]
1167 name = catalog.name
1168 filtered_dict = {}
1169 if filter_dict.get("name") and filter_dict["name"] != name:
1170 continue
1171 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1172 continue
1173 filtered_dict ["name"] = name
1174 filtered_dict ["id"] = catalog_uuid
1175 image_list.append(filtered_dict)
1176
1177 self.logger.debug("List of already created catalog items: {}".format(image_list))
1178 return image_list
1179 except Exception as exp:
1180 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1181
1182 def get_vappid(self, vdc=None, vapp_name=None):
1183 """ Method takes vdc object and vApp name and returns vapp uuid or None
1184
1185 Args:
1186 vdc: The VDC object.
1187 vapp_name: is application vappp name identifier
1188
1189 Returns:
1190 The return vApp name otherwise None
1191 """
1192 if vdc is None or vapp_name is None:
1193 return None
1194 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1195 try:
1196 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1197 vdc.ResourceEntities.ResourceEntity)
1198 if len(refs) == 1:
1199 return refs[0].href.split("vapp")[1][1:]
1200 except Exception as e:
1201 self.logger.exception(e)
1202 return False
1203 return None
1204
1205 def check_vapp(self, vdc=None, vapp_uuid=None):
1206 """ Method Method returns True or False if vapp deployed in vCloud director
1207
1208 Args:
1209 vca: Connector to VCA
1210 vdc: The VDC object.
1211 vappid: vappid is application identifier
1212
1213 Returns:
1214 The return True if vApp deployed
1215 :param vdc:
1216 :param vapp_uuid:
1217 """
1218 try:
1219 refs = filter(lambda ref:
1220 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1221 vdc.ResourceEntities.ResourceEntity)
1222 for ref in refs:
1223 vappid = ref.href.split("vapp")[1][1:]
1224 # find vapp with respected vapp uuid
1225 if vappid == vapp_uuid:
1226 return True
1227 except Exception as e:
1228 self.logger.exception(e)
1229 return False
1230 return False
1231
1232 def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
1233 """Method returns vApp name from vCD and lookup done by vapp_id.
1234
1235 Args:
1236 vca: Connector to VCA
1237 vdc: The VDC object.
1238 vapp_uuid: vappid is application identifier
1239
1240 Returns:
1241 The return vApp name otherwise None
1242 """
1243
1244 try:
1245 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1246 vdc.ResourceEntities.ResourceEntity)
1247 for ref in refs:
1248 # we care only about UUID the rest doesn't matter
1249 vappid = ref.href.split("vapp")[1][1:]
1250 if vappid == vapp_uuid:
1251 response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
1252 logger=self.logger)
1253 tree = XmlElementTree.fromstring(response.content)
1254 return tree.attrib['name']
1255 except Exception as e:
1256 self.logger.exception(e)
1257 return None
1258 return None
1259
1260 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1261 cloud_config=None, disk_list=None):
1262 """Adds a VM instance to VIM
1263 Params:
1264 start: indicates if VM must start or boot in pause mode. Ignored
1265 image_id,flavor_id: image and flavor uuid
1266 net_list: list of interfaces, each one is a dictionary with:
1267 name:
1268 net_id: network uuid to connect
1269 vpci: virtual vcpi to assign
1270 model: interface model, virtio, e2000, ...
1271 mac_address:
1272 use: 'data', 'bridge', 'mgmt'
1273 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1274 vim_id: filled/added by this function
1275 cloud_config: can be a text script to be passed directly to cloud-init,
1276 or an object to inject users and ssh keys with format:
1277 key-pairs: [] list of keys to install to the default user
1278 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1279 #TODO ip, security groups
1280 Returns >=0, the instance identifier
1281 <0, error_text
1282 """
1283
1284 self.logger.info("Creating new instance for entry {}".format(name))
1285 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1286 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1287 vca = self.connect()
1288 if not vca:
1289 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1290
1291 #new vm name = vmname + tenant_id + uuid
1292 new_vm_name = [name, '-', str(uuid.uuid4())]
1293 vmname_andid = ''.join(new_vm_name)
1294
1295 # if vm already deployed we return existing uuid
1296 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1297 # if vapp_uuid is not None:
1298 # return vapp_uuid
1299
1300 # we check for presence of VDC, Catalog entry and Flavor.
1301 vdc = vca.get_vdc(self.tenant_name)
1302 if vdc is None:
1303 raise vimconn.vimconnNotFoundException(
1304 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1305 catalogs = vca.get_catalogs()
1306 if catalogs is None:
1307 raise vimconn.vimconnNotFoundException(
1308 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1309
1310 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1311 if catalog_hash_name:
1312 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1313 else:
1314 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1315 "(Failed retrieve catalog information {})".format(name, image_id))
1316
1317
1318 # Set vCPU and Memory based on flavor.
1319 vm_cpus = None
1320 vm_memory = None
1321 vm_disk = None
1322
1323 if flavor_id is not None:
1324 if flavor_id not in vimconnector.flavorlist:
1325 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1326 "Failed retrieve flavor information "
1327 "flavor id {}".format(name, flavor_id))
1328 else:
1329 try:
1330 flavor = vimconnector.flavorlist[flavor_id]
1331 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1332 vm_memory = flavor[FLAVOR_RAM_KEY]
1333 vm_disk = flavor[FLAVOR_DISK_KEY]
1334 extended = flavor.get("extended", None)
1335 if extended:
1336 numas=extended.get("numas", None)
1337
1338 except Exception as exp:
1339 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1340
1341 # image upload creates template name as catalog name space Template.
1342 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1343 power_on = 'false'
1344 if start:
1345 power_on = 'true'
1346
1347 # client must provide at least one entry in net_list if not we report error
1348 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1349 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1350 primary_net = None
1351 primary_netname = None
1352 network_mode = 'bridged'
1353 if net_list is not None and len(net_list) > 0:
1354 for net in net_list:
1355 if 'use' in net and net['use'] == 'mgmt':
1356 primary_net = net
1357 if primary_net is None:
1358 primary_net = net_list[0]
1359
1360 try:
1361 primary_net_id = primary_net['net_id']
1362 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1363 if 'name' in network_dict:
1364 primary_netname = network_dict['name']
1365
1366 except KeyError:
1367 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1368 else:
1369 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1370
1371 # use: 'data', 'bridge', 'mgmt'
1372 # create vApp. Set vcpu and ram based on flavor id.
1373 try:
1374 vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1375 self.get_catalogbyid(image_id, catalogs),
1376 network_name=None, # None while creating vapp
1377 network_mode=network_mode,
1378 vm_name=vmname_andid,
1379 vm_cpus=vm_cpus, # can be None if flavor is None
1380 vm_memory=vm_memory) # can be None if flavor is None
1381
1382 if vapptask is None or vapptask is False:
1383 raise vimconn.vimconnUnexpectedResponse(
1384 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1385 if type(vapptask) is VappTask:
1386 vca.block_until_completed(vapptask)
1387
1388 except Exception as exp:
1389 raise vimconn.vimconnUnexpectedResponse(
1390 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1391
1392 # we should have now vapp in undeployed state.
1393 try:
1394 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1395 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1396 except Exception as exp:
1397 raise vimconn.vimconnUnexpectedResponse(
1398 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1399 .format(vmname_andid, exp))
1400
1401 if vapp is None:
1402 raise vimconn.vimconnUnexpectedResponse(
1403 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1404 vmname_andid))
1405
1406 #Add PCI passthrough/SRIOV configrations
1407 vm_obj = None
1408 pci_devices_info = []
1409 sriov_net_info = []
1410 reserve_memory = False
1411
1412 for net in net_list:
1413 if net["type"]=="PF":
1414 pci_devices_info.append(net)
1415 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1416 sriov_net_info.append(net)
1417
1418 #Add PCI
1419 if len(pci_devices_info) > 0:
1420 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1421 vmname_andid ))
1422 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1423 pci_devices_info,
1424 vmname_andid)
1425 if PCI_devices_status:
1426 self.logger.info("Added PCI devives {} to VM {}".format(
1427 pci_devices_info,
1428 vmname_andid)
1429 )
1430 reserve_memory = True
1431 else:
1432 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1433 pci_devices_info,
1434 vmname_andid)
1435 )
1436 # Modify vm disk
1437 if vm_disk:
1438 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1439 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1440 if result :
1441 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1442
1443 #Add new or existing disks to vApp
1444 if disk_list:
1445 added_existing_disk = False
1446 for disk in disk_list:
1447 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1448 image_id = disk['image_id']
1449 # Adding CD-ROM to VM
1450 # will revisit code once specification ready to support this feature
1451 self.insert_media_to_vm(vapp, image_id)
1452 elif "image_id" in disk and disk["image_id"] is not None:
1453 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1454 disk["image_id"] , vapp_uuid))
1455 self.add_existing_disk(catalogs=catalogs,
1456 image_id=disk["image_id"],
1457 size = disk["size"],
1458 template_name=templateName,
1459 vapp_uuid=vapp_uuid
1460 )
1461 added_existing_disk = True
1462 else:
1463 #Wait till added existing disk gets reflected into vCD database/API
1464 if added_existing_disk:
1465 time.sleep(5)
1466 added_existing_disk = False
1467 self.add_new_disk(vca, vapp_uuid, disk['size'])
1468
1469 if numas:
1470 # Assigning numa affinity setting
1471 for numa in numas:
1472 if 'paired-threads-id' in numa:
1473 paired_threads_id = numa['paired-threads-id']
1474 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1475
1476 # add NICs & connect to networks in netlist
1477 try:
1478 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1479 nicIndex = 0
1480 primary_nic_index = 0
1481 for net in net_list:
1482 # openmano uses network id in UUID format.
1483 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1484 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1485 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1486
1487 if 'net_id' not in net:
1488 continue
1489
1490 interface_net_id = net['net_id']
1491 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1492 interface_network_mode = net['use']
1493
1494 if interface_network_mode == 'mgmt':
1495 primary_nic_index = nicIndex
1496
1497 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1498 - DHCP (The IP address is obtained from a DHCP service.)
1499 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1500 - NONE (No IP addressing mode specified.)"""
1501
1502 if primary_netname is not None:
1503 nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
1504 if len(nets) == 1:
1505 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1506 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1507 if type(task) is GenericTask:
1508 vca.block_until_completed(task)
1509 # connect network to VM - with all DHCP by default
1510
1511 type_list = ['PF','VF','VFnotShared']
1512 if 'type' in net and net['type'] not in type_list:
1513 # fetching nic type from vnf
1514 if 'model' in net:
1515 nic_type = net['model']
1516 self.logger.info("new_vminstance(): adding network adapter "\
1517 "to a network {}".format(nets[0].name))
1518 self.add_network_adapter_to_vms(vapp, nets[0].name,
1519 primary_nic_index,
1520 nicIndex,
1521 net,
1522 nic_type=nic_type)
1523 else:
1524 self.logger.info("new_vminstance(): adding network adapter "\
1525 "to a network {}".format(nets[0].name))
1526 self.add_network_adapter_to_vms(vapp, nets[0].name,
1527 primary_nic_index,
1528 nicIndex,
1529 net)
1530 nicIndex += 1
1531
1532 # cloud-init for ssh-key injection
1533 if cloud_config:
1534 self.cloud_init(vapp,cloud_config)
1535
1536 # deploy and power on vm
1537 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1538 deploytask = vapp.deploy(powerOn=False)
1539 if type(deploytask) is GenericTask:
1540 vca.block_until_completed(deploytask)
1541
1542 # ############# Stub code for SRIOV #################
1543 #Add SRIOV
1544 # if len(sriov_net_info) > 0:
1545 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1546 # vmname_andid ))
1547 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1548 # sriov_net_info,
1549 # vmname_andid)
1550 # if sriov_status:
1551 # self.logger.info("Added SRIOV {} to VM {}".format(
1552 # sriov_net_info,
1553 # vmname_andid)
1554 # )
1555 # reserve_memory = True
1556 # else:
1557 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1558 # sriov_net_info,
1559 # vmname_andid)
1560 # )
1561
1562 # If VM has PCI devices or SRIOV reserve memory for VM
1563 if reserve_memory:
1564 memReserve = vm_obj.config.hardware.memoryMB
1565 spec = vim.vm.ConfigSpec()
1566 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1567 task = vm_obj.ReconfigVM_Task(spec=spec)
1568 if task:
1569 result = self.wait_for_vcenter_task(task, vcenter_conect)
1570 self.logger.info("Reserved memmoery {} MB for "\
1571 "VM VM status: {}".format(str(memReserve),result))
1572 else:
1573 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1574 str(memReserve),str(vm_obj)))
1575
1576 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1577 poweron_task = vapp.poweron()
1578 if type(poweron_task) is GenericTask:
1579 vca.block_until_completed(poweron_task)
1580
1581 except Exception as exp :
1582 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1583 self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1584 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1585
1586 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1587 wait_time = 0
1588 vapp_uuid = None
1589 while wait_time <= MAX_WAIT_TIME:
1590 try:
1591 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1592 except Exception as exp:
1593 raise vimconn.vimconnUnexpectedResponse(
1594 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1595 .format(vmname_andid, exp))
1596
1597 if vapp and vapp.me.deployed:
1598 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1599 break
1600 else:
1601 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1602 time.sleep(INTERVAL_TIME)
1603
1604 wait_time +=INTERVAL_TIME
1605
1606 if vapp_uuid is not None:
1607 return vapp_uuid
1608 else:
1609 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1610
1611 ##
1612 ##
1613 ## based on current discussion
1614 ##
1615 ##
1616 ## server:
1617 # created: '2016-09-08T11:51:58'
1618 # description: simple-instance.linux1.1
1619 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1620 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1621 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1622 # status: ACTIVE
1623 # error_msg:
1624 # interfaces: …
1625 #
1626 def get_vminstance(self, vim_vm_uuid=None):
1627 """Returns the VM instance information from VIM"""
1628
1629 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1630 vca = self.connect()
1631 if not vca:
1632 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1633
1634 vdc = vca.get_vdc(self.tenant_name)
1635 if vdc is None:
1636 raise vimconn.vimconnConnectionException(
1637 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1638
1639 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1640 if not vm_info_dict:
1641 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1642 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1643
1644 status_key = vm_info_dict['status']
1645 error = ''
1646 try:
1647 vm_dict = {'created': vm_info_dict['created'],
1648 'description': vm_info_dict['name'],
1649 'status': vcdStatusCode2manoFormat[int(status_key)],
1650 'hostId': vm_info_dict['vmuuid'],
1651 'error_msg': error,
1652 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1653
1654 if 'interfaces' in vm_info_dict:
1655 vm_dict['interfaces'] = vm_info_dict['interfaces']
1656 else:
1657 vm_dict['interfaces'] = []
1658 except KeyError:
1659 vm_dict = {'created': '',
1660 'description': '',
1661 'status': vcdStatusCode2manoFormat[int(-1)],
1662 'hostId': vm_info_dict['vmuuid'],
1663 'error_msg': "Inconsistency state",
1664 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1665
1666 return vm_dict
1667
1668 def delete_vminstance(self, vm__vim_uuid):
1669 """Method poweroff and remove VM instance from vcloud director network.
1670
1671 Args:
1672 vm__vim_uuid: VM UUID
1673
1674 Returns:
1675 Returns the instance identifier
1676 """
1677
1678 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1679 vca = self.connect()
1680 if not vca:
1681 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1682
1683 vdc = vca.get_vdc(self.tenant_name)
1684 if vdc is None:
1685 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1686 self.tenant_name))
1687 raise vimconn.vimconnException(
1688 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1689
1690 try:
1691 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1692 if vapp_name is None:
1693 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1694 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1695 else:
1696 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1697
1698 # Delete vApp and wait for status change if task executed and vApp is None.
1699 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1700
1701 if vapp:
1702 if vapp.me.deployed:
1703 self.logger.info("Powering off vApp {}".format(vapp_name))
1704 #Power off vApp
1705 powered_off = False
1706 wait_time = 0
1707 while wait_time <= MAX_WAIT_TIME:
1708 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1709 if not vapp:
1710 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1711 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1712
1713 power_off_task = vapp.poweroff()
1714 if type(power_off_task) is GenericTask:
1715 result = vca.block_until_completed(power_off_task)
1716 if result:
1717 powered_off = True
1718 break
1719 else:
1720 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1721 time.sleep(INTERVAL_TIME)
1722
1723 wait_time +=INTERVAL_TIME
1724 if not powered_off:
1725 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1726 else:
1727 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1728
1729 #Undeploy vApp
1730 self.logger.info("Undeploy vApp {}".format(vapp_name))
1731 wait_time = 0
1732 undeployed = False
1733 while wait_time <= MAX_WAIT_TIME:
1734 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1735 if not vapp:
1736 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1737 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1738 undeploy_task = vapp.undeploy(action='powerOff')
1739
1740 if type(undeploy_task) is GenericTask:
1741 result = vca.block_until_completed(undeploy_task)
1742 if result:
1743 undeployed = True
1744 break
1745 else:
1746 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1747 time.sleep(INTERVAL_TIME)
1748
1749 wait_time +=INTERVAL_TIME
1750
1751 if not undeployed:
1752 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1753
1754 # delete vapp
1755 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1756 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1757
1758 if vapp is not None:
1759 wait_time = 0
1760 result = False
1761
1762 while wait_time <= MAX_WAIT_TIME:
1763 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1764 if not vapp:
1765 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1766 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1767
1768 delete_task = vapp.delete()
1769
1770 if type(delete_task) is GenericTask:
1771 vca.block_until_completed(delete_task)
1772 result = vca.block_until_completed(delete_task)
1773 if result:
1774 break
1775 else:
1776 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1777 time.sleep(INTERVAL_TIME)
1778
1779 wait_time +=INTERVAL_TIME
1780
1781 if not result:
1782 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1783
1784 except:
1785 self.logger.debug(traceback.format_exc())
1786 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1787
1788 if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
1789 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1790 return vm__vim_uuid
1791 else:
1792 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1793
1794 def refresh_vms_status(self, vm_list):
1795 """Get the status of the virtual machines and their interfaces/ports
1796 Params: the list of VM identifiers
1797 Returns a dictionary with:
1798 vm_id: #VIM id of this Virtual Machine
1799 status: #Mandatory. Text with one of:
1800 # DELETED (not found at vim)
1801 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1802 # OTHER (Vim reported other status not understood)
1803 # ERROR (VIM indicates an ERROR status)
1804 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1805 # CREATING (on building process), ERROR
1806 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1807 #
1808 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1809 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1810 interfaces:
1811 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1812 mac_address: #Text format XX:XX:XX:XX:XX:XX
1813 vim_net_id: #network id where this interface is connected
1814 vim_interface_id: #interface/port VIM id
1815 ip_address: #null, or text with IPv4, IPv6 address
1816 """
1817
1818 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1819
1820 vca = self.connect()
1821 if not vca:
1822 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1823
1824 vdc = vca.get_vdc(self.tenant_name)
1825 if vdc is None:
1826 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1827
1828 vms_dict = {}
1829 nsx_edge_list = []
1830 for vmuuid in vm_list:
1831 vmname = self.get_namebyvappid(vca, vdc, vmuuid)
1832 if vmname is not None:
1833
1834 try:
1835 the_vapp = vca.get_vapp(vdc, vmname)
1836 vm_info = the_vapp.get_vms_details()
1837 vm_status = vm_info[0]['status']
1838 vm_pci_details = self.get_vm_pci_details(vmuuid)
1839 vm_info[0].update(vm_pci_details)
1840
1841 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1842 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1843 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1844
1845 # get networks
1846 vm_app_networks = the_vapp.get_vms_network_info()
1847 for vapp_network in vm_app_networks:
1848 for vm_network in vapp_network:
1849 if vm_network['name'] == vmname:
1850 #Assign IP Address based on MAC Address in NSX DHCP lease info
1851 if vm_network['ip'] is None:
1852 if not nsx_edge_list:
1853 nsx_edge_list = self.get_edge_details()
1854 if nsx_edge_list is None:
1855 raise vimconn.vimconnException("refresh_vms_status:"\
1856 "Failed to get edge details from NSX Manager")
1857 if vm_network['mac'] is not None:
1858 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1859
1860 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1861 interface = {"mac_address": vm_network['mac'],
1862 "vim_net_id": vm_net_id,
1863 "vim_interface_id": vm_net_id,
1864 'ip_address': vm_network['ip']}
1865 # interface['vim_info'] = yaml.safe_dump(vm_network)
1866 vm_dict["interfaces"].append(interface)
1867 # add a vm to vm dict
1868 vms_dict.setdefault(vmuuid, vm_dict)
1869 except Exception as exp:
1870 self.logger.debug("Error in response {}".format(exp))
1871 self.logger.debug(traceback.format_exc())
1872
1873 return vms_dict
1874
1875
1876 def get_edge_details(self):
1877 """Get the NSX edge list from NSX Manager
1878 Returns list of NSX edges
1879 """
1880 edge_list = []
1881 rheaders = {'Content-Type': 'application/xml'}
1882 nsx_api_url = '/api/4.0/edges'
1883
1884 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
1885
1886 try:
1887 resp = requests.get(self.nsx_manager + nsx_api_url,
1888 auth = (self.nsx_user, self.nsx_password),
1889 verify = False, headers = rheaders)
1890 if resp.status_code == requests.codes.ok:
1891 paged_Edge_List = XmlElementTree.fromstring(resp.text)
1892 for edge_pages in paged_Edge_List:
1893 if edge_pages.tag == 'edgePage':
1894 for edge_summary in edge_pages:
1895 if edge_summary.tag == 'pagingInfo':
1896 for element in edge_summary:
1897 if element.tag == 'totalCount' and element.text == '0':
1898 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
1899 .format(self.nsx_manager))
1900
1901 if edge_summary.tag == 'edgeSummary':
1902 for element in edge_summary:
1903 if element.tag == 'id':
1904 edge_list.append(element.text)
1905 else:
1906 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
1907 .format(self.nsx_manager))
1908
1909 if not edge_list:
1910 raise vimconn.vimconnException("get_edge_details: "\
1911 "No NSX edge details found: {}"
1912 .format(self.nsx_manager))
1913 else:
1914 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
1915 return edge_list
1916 else:
1917 self.logger.debug("get_edge_details: "
1918 "Failed to get NSX edge details from NSX Manager: {}"
1919 .format(resp.content))
1920 return None
1921
1922 except Exception as exp:
1923 self.logger.debug("get_edge_details: "\
1924 "Failed to get NSX edge details from NSX Manager: {}"
1925 .format(exp))
1926 raise vimconn.vimconnException("get_edge_details: "\
1927 "Failed to get NSX edge details from NSX Manager: {}"
1928 .format(exp))
1929
1930
1931 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
1932 """Get IP address details from NSX edges, using the MAC address
1933 PARAMS: nsx_edges : List of NSX edges
1934 mac_address : Find IP address corresponding to this MAC address
1935 Returns: IP address corrresponding to the provided MAC address
1936 """
1937
1938 ip_addr = None
1939 rheaders = {'Content-Type': 'application/xml'}
1940
1941 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
1942
1943 try:
1944 for edge in nsx_edges:
1945 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
1946
1947 resp = requests.get(self.nsx_manager + nsx_api_url,
1948 auth = (self.nsx_user, self.nsx_password),
1949 verify = False, headers = rheaders)
1950
1951 if resp.status_code == requests.codes.ok:
1952 dhcp_leases = XmlElementTree.fromstring(resp.text)
1953 for child in dhcp_leases:
1954 if child.tag == 'dhcpLeaseInfo':
1955 dhcpLeaseInfo = child
1956 for leaseInfo in dhcpLeaseInfo:
1957 for elem in leaseInfo:
1958 if (elem.tag)=='macAddress':
1959 edge_mac_addr = elem.text
1960 if (elem.tag)=='ipAddress':
1961 ip_addr = elem.text
1962 if edge_mac_addr is not None:
1963 if edge_mac_addr == mac_address:
1964 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
1965 .format(ip_addr, mac_address,edge))
1966 return ip_addr
1967 else:
1968 self.logger.debug("get_ipaddr_from_NSXedge: "\
1969 "Error occurred while getting DHCP lease info from NSX Manager: {}"
1970 .format(resp.content))
1971
1972 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
1973 return None
1974
1975 except XmlElementTree.ParseError as Err:
1976 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
1977
1978
1979 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
1980 """Send and action over a VM instance from VIM
1981 Returns the vm_id if the action was successfully sent to the VIM"""
1982
1983 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
1984 if vm__vim_uuid is None or action_dict is None:
1985 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
1986
1987 vca = self.connect()
1988 if not vca:
1989 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1990
1991 vdc = vca.get_vdc(self.tenant_name)
1992 if vdc is None:
1993 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
1994
1995 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1996 if vapp_name is None:
1997 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1998 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1999 else:
2000 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2001
2002 try:
2003 the_vapp = vca.get_vapp(vdc, vapp_name)
2004 # TODO fix all status
2005 if "start" in action_dict:
2006 vm_info = the_vapp.get_vms_details()
2007 vm_status = vm_info[0]['status']
2008 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2009 if vm_status == "Suspended" or vm_status == "Powered off":
2010 power_on_task = the_vapp.poweron()
2011 result = vca.block_until_completed(power_on_task)
2012 self.instance_actions_result("start", result, vapp_name)
2013 elif "rebuild" in action_dict:
2014 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2015 rebuild_task = the_vapp.deploy(powerOn=True)
2016 result = vca.block_until_completed(rebuild_task)
2017 self.instance_actions_result("rebuild", result, vapp_name)
2018 elif "pause" in action_dict:
2019 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2020 pause_task = the_vapp.undeploy(action='suspend')
2021 result = vca.block_until_completed(pause_task)
2022 self.instance_actions_result("pause", result, vapp_name)
2023 elif "resume" in action_dict:
2024 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2025 power_task = the_vapp.poweron()
2026 result = vca.block_until_completed(power_task)
2027 self.instance_actions_result("resume", result, vapp_name)
2028 elif "shutoff" in action_dict or "shutdown" in action_dict:
2029 action_name , value = action_dict.items()[0]
2030 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2031 power_off_task = the_vapp.undeploy(action='powerOff')
2032 result = vca.block_until_completed(power_off_task)
2033 if action_name == "shutdown":
2034 self.instance_actions_result("shutdown", result, vapp_name)
2035 else:
2036 self.instance_actions_result("shutoff", result, vapp_name)
2037 elif "forceOff" in action_dict:
2038 result = the_vapp.undeploy(action='force')
2039 self.instance_actions_result("forceOff", result, vapp_name)
2040 elif "reboot" in action_dict:
2041 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2042 reboot_task = the_vapp.reboot()
2043 else:
2044 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2045 return vm__vim_uuid
2046 except Exception as exp :
2047 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2048 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2049
2050 def instance_actions_result(self, action, result, vapp_name):
2051 if result:
2052 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2053 else:
2054 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2055
2056 def get_vminstance_console(self, vm_id, console_type="vnc"):
2057 """
2058 Get a console for the virtual machine
2059 Params:
2060 vm_id: uuid of the VM
2061 console_type, can be:
2062 "novnc" (by default), "xvpvnc" for VNC types,
2063 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2064 Returns dict with the console parameters:
2065 protocol: ssh, ftp, http, https, ...
2066 server: usually ip address
2067 port: the http, ssh, ... port
2068 suffix: extra text, e.g. the http path and query string
2069 """
2070 raise vimconn.vimconnNotImplemented("Should have implemented this")
2071
2072 # NOT USED METHODS in current version
2073
2074 def host_vim2gui(self, host, server_dict):
2075 """Transform host dictionary from VIM format to GUI format,
2076 and append to the server_dict
2077 """
2078 raise vimconn.vimconnNotImplemented("Should have implemented this")
2079
2080 def get_hosts_info(self):
2081 """Get the information of deployed hosts
2082 Returns the hosts content"""
2083 raise vimconn.vimconnNotImplemented("Should have implemented this")
2084
2085 def get_hosts(self, vim_tenant):
2086 """Get the hosts and deployed instances
2087 Returns the hosts content"""
2088 raise vimconn.vimconnNotImplemented("Should have implemented this")
2089
2090 def get_processor_rankings(self):
2091 """Get the processor rankings in the VIM database"""
2092 raise vimconn.vimconnNotImplemented("Should have implemented this")
2093
2094 def new_host(self, host_data):
2095 """Adds a new host to VIM"""
2096 '''Returns status code of the VIM response'''
2097 raise vimconn.vimconnNotImplemented("Should have implemented this")
2098
2099 def new_external_port(self, port_data):
2100 """Adds a external port to VIM"""
2101 '''Returns the port identifier'''
2102 raise vimconn.vimconnNotImplemented("Should have implemented this")
2103
2104 def new_external_network(self, net_name, net_type):
2105 """Adds a external network to VIM (shared)"""
2106 '''Returns the network identifier'''
2107 raise vimconn.vimconnNotImplemented("Should have implemented this")
2108
2109 def connect_port_network(self, port_id, network_id, admin=False):
2110 """Connects a external port to a network"""
2111 '''Returns status code of the VIM response'''
2112 raise vimconn.vimconnNotImplemented("Should have implemented this")
2113
2114 def new_vminstancefromJSON(self, vm_data):
2115 """Adds a VM instance to VIM"""
2116 '''Returns the instance identifier'''
2117 raise vimconn.vimconnNotImplemented("Should have implemented this")
2118
2119 def get_network_name_by_id(self, network_uuid=None):
2120 """Method gets vcloud director network named based on supplied uuid.
2121
2122 Args:
2123 network_uuid: network_id
2124
2125 Returns:
2126 The return network name.
2127 """
2128
2129 vca = self.connect()
2130 if not vca:
2131 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2132
2133 if not network_uuid:
2134 return None
2135
2136 try:
2137 org_dict = self.get_org(self.org_uuid)
2138 if 'networks' in org_dict:
2139 org_network_dict = org_dict['networks']
2140 for net_uuid in org_network_dict:
2141 if net_uuid == network_uuid:
2142 return org_network_dict[net_uuid]
2143 except:
2144 self.logger.debug("Exception in get_network_name_by_id")
2145 self.logger.debug(traceback.format_exc())
2146
2147 return None
2148
2149 def get_network_id_by_name(self, network_name=None):
2150 """Method gets vcloud director network uuid based on supplied name.
2151
2152 Args:
2153 network_name: network_name
2154 Returns:
2155 The return network uuid.
2156 network_uuid: network_id
2157 """
2158
2159 vca = self.connect()
2160 if not vca:
2161 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2162
2163 if not network_name:
2164 self.logger.debug("get_network_id_by_name() : Network name is empty")
2165 return None
2166
2167 try:
2168 org_dict = self.get_org(self.org_uuid)
2169 if org_dict and 'networks' in org_dict:
2170 org_network_dict = org_dict['networks']
2171 for net_uuid,net_name in org_network_dict.iteritems():
2172 if net_name == network_name:
2173 return net_uuid
2174
2175 except KeyError as exp:
2176 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2177
2178 return None
2179
2180 def list_org_action(self):
2181 """
2182 Method leverages vCloud director and query for available organization for particular user
2183
2184 Args:
2185 vca - is active VCA connection.
2186 vdc_name - is a vdc name that will be used to query vms action
2187
2188 Returns:
2189 The return XML respond
2190 """
2191
2192 vca = self.connect()
2193 if not vca:
2194 raise vimconn.vimconnConnectionException("self.connect() is failed")
2195
2196 url_list = [vca.host, '/api/org']
2197 vm_list_rest_call = ''.join(url_list)
2198
2199 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2200 response = Http.get(url=vm_list_rest_call,
2201 headers=vca.vcloud_session.get_vcloud_headers(),
2202 verify=vca.verify,
2203 logger=vca.logger)
2204 if response.status_code == requests.codes.ok:
2205 return response.content
2206
2207 return None
2208
2209 def get_org_action(self, org_uuid=None):
2210 """
2211 Method leverages vCloud director and retrieve available object fdr organization.
2212
2213 Args:
2214 vca - is active VCA connection.
2215 vdc_name - is a vdc name that will be used to query vms action
2216
2217 Returns:
2218 The return XML respond
2219 """
2220
2221 vca = self.connect()
2222 if not vca:
2223 raise vimconn.vimconnConnectionException("self.connect() is failed")
2224
2225 if org_uuid is None:
2226 return None
2227
2228 url_list = [vca.host, '/api/org/', org_uuid]
2229 vm_list_rest_call = ''.join(url_list)
2230
2231 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2232 response = Http.get(url=vm_list_rest_call,
2233 headers=vca.vcloud_session.get_vcloud_headers(),
2234 verify=vca.verify,
2235 logger=vca.logger)
2236 if response.status_code == requests.codes.ok:
2237 return response.content
2238
2239 return None
2240
2241 def get_org(self, org_uuid=None):
2242 """
2243 Method retrieves available organization in vCloud Director
2244
2245 Args:
2246 org_uuid - is a organization uuid.
2247
2248 Returns:
2249 The return dictionary with following key
2250 "network" - for network list under the org
2251 "catalogs" - for network list under the org
2252 "vdcs" - for vdc list under org
2253 """
2254
2255 org_dict = {}
2256 vca = self.connect()
2257 if not vca:
2258 raise vimconn.vimconnConnectionException("self.connect() is failed")
2259
2260 if org_uuid is None:
2261 return org_dict
2262
2263 content = self.get_org_action(org_uuid=org_uuid)
2264 try:
2265 vdc_list = {}
2266 network_list = {}
2267 catalog_list = {}
2268 vm_list_xmlroot = XmlElementTree.fromstring(content)
2269 for child in vm_list_xmlroot:
2270 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2271 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2272 org_dict['vdcs'] = vdc_list
2273 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2274 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2275 org_dict['networks'] = network_list
2276 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2277 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2278 org_dict['catalogs'] = catalog_list
2279 except:
2280 pass
2281
2282 return org_dict
2283
2284 def get_org_list(self):
2285 """
2286 Method retrieves available organization in vCloud Director
2287
2288 Args:
2289 vca - is active VCA connection.
2290
2291 Returns:
2292 The return dictionary and key for each entry VDC UUID
2293 """
2294
2295 org_dict = {}
2296 vca = self.connect()
2297 if not vca:
2298 raise vimconn.vimconnConnectionException("self.connect() is failed")
2299
2300 content = self.list_org_action()
2301 try:
2302 vm_list_xmlroot = XmlElementTree.fromstring(content)
2303 for vm_xml in vm_list_xmlroot:
2304 if vm_xml.tag.split("}")[1] == 'Org':
2305 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2306 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2307 except:
2308 pass
2309
2310 return org_dict
2311
2312 def vms_view_action(self, vdc_name=None):
2313 """ Method leverages vCloud director vms query call
2314
2315 Args:
2316 vca - is active VCA connection.
2317 vdc_name - is a vdc name that will be used to query vms action
2318
2319 Returns:
2320 The return XML respond
2321 """
2322 vca = self.connect()
2323 if vdc_name is None:
2324 return None
2325
2326 url_list = [vca.host, '/api/vms/query']
2327 vm_list_rest_call = ''.join(url_list)
2328
2329 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2330 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2331 vca.vcloud_session.organization.Link)
2332 if len(refs) == 1:
2333 response = Http.get(url=vm_list_rest_call,
2334 headers=vca.vcloud_session.get_vcloud_headers(),
2335 verify=vca.verify,
2336 logger=vca.logger)
2337 if response.status_code == requests.codes.ok:
2338 return response.content
2339
2340 return None
2341
2342 def get_vapp_list(self, vdc_name=None):
2343 """
2344 Method retrieves vApp list deployed vCloud director and returns a dictionary
2345 contains a list of all vapp deployed for queried VDC.
2346 The key for a dictionary is vApp UUID
2347
2348
2349 Args:
2350 vca - is active VCA connection.
2351 vdc_name - is a vdc name that will be used to query vms action
2352
2353 Returns:
2354 The return dictionary and key for each entry vapp UUID
2355 """
2356
2357 vapp_dict = {}
2358 if vdc_name is None:
2359 return vapp_dict
2360
2361 content = self.vms_view_action(vdc_name=vdc_name)
2362 try:
2363 vm_list_xmlroot = XmlElementTree.fromstring(content)
2364 for vm_xml in vm_list_xmlroot:
2365 if vm_xml.tag.split("}")[1] == 'VMRecord':
2366 if vm_xml.attrib['isVAppTemplate'] == 'true':
2367 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2368 if 'vappTemplate-' in rawuuid[0]:
2369 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2370 # vm and use raw UUID as key
2371 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2372 except:
2373 pass
2374
2375 return vapp_dict
2376
2377 def get_vm_list(self, vdc_name=None):
2378 """
2379 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2380 contains a list of all VM's deployed for queried VDC.
2381 The key for a dictionary is VM UUID
2382
2383
2384 Args:
2385 vca - is active VCA connection.
2386 vdc_name - is a vdc name that will be used to query vms action
2387
2388 Returns:
2389 The return dictionary and key for each entry vapp UUID
2390 """
2391 vm_dict = {}
2392
2393 if vdc_name is None:
2394 return vm_dict
2395
2396 content = self.vms_view_action(vdc_name=vdc_name)
2397 try:
2398 vm_list_xmlroot = XmlElementTree.fromstring(content)
2399 for vm_xml in vm_list_xmlroot:
2400 if vm_xml.tag.split("}")[1] == 'VMRecord':
2401 if vm_xml.attrib['isVAppTemplate'] == 'false':
2402 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2403 if 'vm-' in rawuuid[0]:
2404 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2405 # vm and use raw UUID as key
2406 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2407 except:
2408 pass
2409
2410 return vm_dict
2411
2412 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2413 """
2414 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2415 contains a list of all VM's deployed for queried VDC.
2416 The key for a dictionary is VM UUID
2417
2418
2419 Args:
2420 vca - is active VCA connection.
2421 vdc_name - is a vdc name that will be used to query vms action
2422
2423 Returns:
2424 The return dictionary and key for each entry vapp UUID
2425 """
2426 vm_dict = {}
2427 vca = self.connect()
2428 if not vca:
2429 raise vimconn.vimconnConnectionException("self.connect() is failed")
2430
2431 if vdc_name is None:
2432 return vm_dict
2433
2434 content = self.vms_view_action(vdc_name=vdc_name)
2435 try:
2436 vm_list_xmlroot = XmlElementTree.fromstring(content)
2437 for vm_xml in vm_list_xmlroot:
2438 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2439 # lookup done by UUID
2440 if isuuid:
2441 if vapp_name in vm_xml.attrib['container']:
2442 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2443 if 'vm-' in rawuuid[0]:
2444 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2445 break
2446 # lookup done by Name
2447 else:
2448 if vapp_name in vm_xml.attrib['name']:
2449 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2450 if 'vm-' in rawuuid[0]:
2451 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2452 break
2453 except:
2454 pass
2455
2456 return vm_dict
2457
2458 def get_network_action(self, network_uuid=None):
2459 """
2460 Method leverages vCloud director and query network based on network uuid
2461
2462 Args:
2463 vca - is active VCA connection.
2464 network_uuid - is a network uuid
2465
2466 Returns:
2467 The return XML respond
2468 """
2469
2470 vca = self.connect()
2471 if not vca:
2472 raise vimconn.vimconnConnectionException("self.connect() is failed")
2473
2474 if network_uuid is None:
2475 return None
2476
2477 url_list = [vca.host, '/api/network/', network_uuid]
2478 vm_list_rest_call = ''.join(url_list)
2479
2480 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2481 response = Http.get(url=vm_list_rest_call,
2482 headers=vca.vcloud_session.get_vcloud_headers(),
2483 verify=vca.verify,
2484 logger=vca.logger)
2485 if response.status_code == requests.codes.ok:
2486 return response.content
2487
2488 return None
2489
2490 def get_vcd_network(self, network_uuid=None):
2491 """
2492 Method retrieves available network from vCloud Director
2493
2494 Args:
2495 network_uuid - is VCD network UUID
2496
2497 Each element serialized as key : value pair
2498
2499 Following keys available for access. network_configuration['Gateway'}
2500 <Configuration>
2501 <IpScopes>
2502 <IpScope>
2503 <IsInherited>true</IsInherited>
2504 <Gateway>172.16.252.100</Gateway>
2505 <Netmask>255.255.255.0</Netmask>
2506 <Dns1>172.16.254.201</Dns1>
2507 <Dns2>172.16.254.202</Dns2>
2508 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2509 <IsEnabled>true</IsEnabled>
2510 <IpRanges>
2511 <IpRange>
2512 <StartAddress>172.16.252.1</StartAddress>
2513 <EndAddress>172.16.252.99</EndAddress>
2514 </IpRange>
2515 </IpRanges>
2516 </IpScope>
2517 </IpScopes>
2518 <FenceMode>bridged</FenceMode>
2519
2520 Returns:
2521 The return dictionary and key for each entry vapp UUID
2522 """
2523
2524 network_configuration = {}
2525 if network_uuid is None:
2526 return network_uuid
2527
2528 try:
2529 content = self.get_network_action(network_uuid=network_uuid)
2530 vm_list_xmlroot = XmlElementTree.fromstring(content)
2531
2532 network_configuration['status'] = vm_list_xmlroot.get("status")
2533 network_configuration['name'] = vm_list_xmlroot.get("name")
2534 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2535
2536 for child in vm_list_xmlroot:
2537 if child.tag.split("}")[1] == 'IsShared':
2538 network_configuration['isShared'] = child.text.strip()
2539 if child.tag.split("}")[1] == 'Configuration':
2540 for configuration in child.iter():
2541 tagKey = configuration.tag.split("}")[1].strip()
2542 if tagKey != "":
2543 network_configuration[tagKey] = configuration.text.strip()
2544 return network_configuration
2545 except Exception as exp :
2546 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2547 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2548
2549 return network_configuration
2550
2551 def delete_network_action(self, network_uuid=None):
2552 """
2553 Method delete given network from vCloud director
2554
2555 Args:
2556 network_uuid - is a network uuid that client wish to delete
2557
2558 Returns:
2559 The return None or XML respond or false
2560 """
2561
2562 vca = self.connect_as_admin()
2563 if not vca:
2564 raise vimconn.vimconnConnectionException("self.connect() is failed")
2565 if network_uuid is None:
2566 return False
2567
2568 url_list = [vca.host, '/api/admin/network/', network_uuid]
2569 vm_list_rest_call = ''.join(url_list)
2570
2571 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2572 response = Http.delete(url=vm_list_rest_call,
2573 headers=vca.vcloud_session.get_vcloud_headers(),
2574 verify=vca.verify,
2575 logger=vca.logger)
2576
2577 if response.status_code == 202:
2578 return True
2579
2580 return False
2581
2582 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2583 ip_profile=None, isshared='true'):
2584 """
2585 Method create network in vCloud director
2586
2587 Args:
2588 network_name - is network name to be created.
2589 net_type - can be 'bridge','data','ptp','mgmt'.
2590 ip_profile is a dict containing the IP parameters of the network
2591 isshared - is a boolean
2592 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2593 It optional attribute. by default if no parent network indicate the first available will be used.
2594
2595 Returns:
2596 The return network uuid or return None
2597 """
2598
2599 new_network_name = [network_name, '-', str(uuid.uuid4())]
2600 content = self.create_network_rest(network_name=''.join(new_network_name),
2601 ip_profile=ip_profile,
2602 net_type=net_type,
2603 parent_network_uuid=parent_network_uuid,
2604 isshared=isshared)
2605 if content is None:
2606 self.logger.debug("Failed create network {}.".format(network_name))
2607 return None
2608
2609 try:
2610 vm_list_xmlroot = XmlElementTree.fromstring(content)
2611 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2612 if len(vcd_uuid) == 4:
2613 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2614 return vcd_uuid[3]
2615 except:
2616 self.logger.debug("Failed create network {}".format(network_name))
2617 return None
2618
2619 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2620 ip_profile=None, isshared='true'):
2621 """
2622 Method create network in vCloud director
2623
2624 Args:
2625 network_name - is network name to be created.
2626 net_type - can be 'bridge','data','ptp','mgmt'.
2627 ip_profile is a dict containing the IP parameters of the network
2628 isshared - is a boolean
2629 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2630 It optional attribute. by default if no parent network indicate the first available will be used.
2631
2632 Returns:
2633 The return network uuid or return None
2634 """
2635
2636 vca = self.connect_as_admin()
2637 if not vca:
2638 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2639 if network_name is None:
2640 return None
2641
2642 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2643 vm_list_rest_call = ''.join(url_list)
2644 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2645 response = Http.get(url=vm_list_rest_call,
2646 headers=vca.vcloud_session.get_vcloud_headers(),
2647 verify=vca.verify,
2648 logger=vca.logger)
2649
2650 provider_network = None
2651 available_networks = None
2652 add_vdc_rest_url = None
2653
2654 if response.status_code != requests.codes.ok:
2655 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2656 response.status_code))
2657 return None
2658 else:
2659 try:
2660 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2661 for child in vm_list_xmlroot:
2662 if child.tag.split("}")[1] == 'ProviderVdcReference':
2663 provider_network = child.attrib.get('href')
2664 # application/vnd.vmware.admin.providervdc+xml
2665 if child.tag.split("}")[1] == 'Link':
2666 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2667 and child.attrib.get('rel') == 'add':
2668 add_vdc_rest_url = child.attrib.get('href')
2669 except:
2670 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2671 self.logger.debug("Respond body {}".format(response.content))
2672 return None
2673
2674 # find pvdc provided available network
2675 response = Http.get(url=provider_network,
2676 headers=vca.vcloud_session.get_vcloud_headers(),
2677 verify=vca.verify,
2678 logger=vca.logger)
2679 if response.status_code != requests.codes.ok:
2680 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2681 response.status_code))
2682 return None
2683
2684 # available_networks.split("/")[-1]
2685
2686 if parent_network_uuid is None:
2687 try:
2688 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2689 for child in vm_list_xmlroot.iter():
2690 if child.tag.split("}")[1] == 'AvailableNetworks':
2691 for networks in child.iter():
2692 # application/vnd.vmware.admin.network+xml
2693 if networks.attrib.get('href') is not None:
2694 available_networks = networks.attrib.get('href')
2695 break
2696 except:
2697 return None
2698
2699 try:
2700 #Configure IP profile of the network
2701 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2702
2703 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2704 subnet_rand = random.randint(0, 255)
2705 ip_base = "192.168.{}.".format(subnet_rand)
2706 ip_profile['subnet_address'] = ip_base + "0/24"
2707 else:
2708 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2709
2710 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2711 ip_profile['gateway_address']=ip_base + "1"
2712 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2713 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2714 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2715 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2716 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2717 ip_profile['dhcp_start_address']=ip_base + "3"
2718 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2719 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2720 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2721 ip_profile['dns_address']=ip_base + "2"
2722
2723 gateway_address=ip_profile['gateway_address']
2724 dhcp_count=int(ip_profile['dhcp_count'])
2725 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2726
2727 if ip_profile['dhcp_enabled']==True:
2728 dhcp_enabled='true'
2729 else:
2730 dhcp_enabled='false'
2731 dhcp_start_address=ip_profile['dhcp_start_address']
2732
2733 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2734 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2735 end_ip_int += dhcp_count - 1
2736 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2737
2738 ip_version=ip_profile['ip_version']
2739 dns_address=ip_profile['dns_address']
2740 except KeyError as exp:
2741 self.logger.debug("Create Network REST: Key error {}".format(exp))
2742 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2743
2744 # either use client provided UUID or search for a first available
2745 # if both are not defined we return none
2746 if parent_network_uuid is not None:
2747 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2748 add_vdc_rest_url = ''.join(url_list)
2749
2750 #Creating all networks as Direct Org VDC type networks.
2751 #Unused in case of Underlay (data/ptp) network interface.
2752 fence_mode="bridged"
2753 is_inherited='false'
2754 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2755 <Description>Openmano created</Description>
2756 <Configuration>
2757 <IpScopes>
2758 <IpScope>
2759 <IsInherited>{1:s}</IsInherited>
2760 <Gateway>{2:s}</Gateway>
2761 <Netmask>{3:s}</Netmask>
2762 <Dns1>{4:s}</Dns1>
2763 <IsEnabled>{5:s}</IsEnabled>
2764 <IpRanges>
2765 <IpRange>
2766 <StartAddress>{6:s}</StartAddress>
2767 <EndAddress>{7:s}</EndAddress>
2768 </IpRange>
2769 </IpRanges>
2770 </IpScope>
2771 </IpScopes>
2772 <ParentNetwork href="{8:s}"/>
2773 <FenceMode>{9:s}</FenceMode>
2774 </Configuration>
2775 <IsShared>{10:s}</IsShared>
2776 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2777 subnet_address, dns_address, dhcp_enabled,
2778 dhcp_start_address, dhcp_end_address, available_networks,
2779 fence_mode, isshared)
2780
2781 headers = vca.vcloud_session.get_vcloud_headers()
2782 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2783 try:
2784 response = Http.post(url=add_vdc_rest_url,
2785 headers=headers,
2786 data=data,
2787 verify=vca.verify,
2788 logger=vca.logger)
2789
2790 if response.status_code != 201:
2791 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2792 .format(response.status_code,response.content))
2793 else:
2794 network = networkType.parseString(response.content, True)
2795 create_nw_task = network.get_Tasks().get_Task()[0]
2796
2797 # if we all ok we respond with content after network creation completes
2798 # otherwise by default return None
2799 if create_nw_task is not None:
2800 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2801 status = vca.block_until_completed(create_nw_task)
2802 if status:
2803 return response.content
2804 else:
2805 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2806 .format(response.content))
2807 except Exception as exp:
2808 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2809
2810 return None
2811
2812 def convert_cidr_to_netmask(self, cidr_ip=None):
2813 """
2814 Method sets convert CIDR netmask address to normal IP format
2815 Args:
2816 cidr_ip : CIDR IP address
2817 Returns:
2818 netmask : Converted netmask
2819 """
2820 if cidr_ip is not None:
2821 if '/' in cidr_ip:
2822 network, net_bits = cidr_ip.split('/')
2823 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2824 else:
2825 netmask = cidr_ip
2826 return netmask
2827 return None
2828
2829 def get_provider_rest(self, vca=None):
2830 """
2831 Method gets provider vdc view from vcloud director
2832
2833 Args:
2834 network_name - is network name to be created.
2835 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2836 It optional attribute. by default if no parent network indicate the first available will be used.
2837
2838 Returns:
2839 The return xml content of respond or None
2840 """
2841
2842 url_list = [vca.host, '/api/admin']
2843 response = Http.get(url=''.join(url_list),
2844 headers=vca.vcloud_session.get_vcloud_headers(),
2845 verify=vca.verify,
2846 logger=vca.logger)
2847
2848 if response.status_code == requests.codes.ok:
2849 return response.content
2850 return None
2851
2852 def create_vdc(self, vdc_name=None):
2853
2854 vdc_dict = {}
2855
2856 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2857 if xml_content is not None:
2858 try:
2859 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2860 for child in task_resp_xmlroot:
2861 if child.tag.split("}")[1] == 'Owner':
2862 vdc_id = child.attrib.get('href').split("/")[-1]
2863 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2864 return vdc_dict
2865 except:
2866 self.logger.debug("Respond body {}".format(xml_content))
2867
2868 return None
2869
2870 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2871 """
2872 Method create vdc in vCloud director based on VDC template.
2873 it uses pre-defined template that must be named openmano
2874
2875 Args:
2876 vdc_name - name of a new vdc.
2877
2878 Returns:
2879 The return xml content of respond or None
2880 """
2881
2882 self.logger.info("Creating new vdc {}".format(vdc_name))
2883 vca = self.connect()
2884 if not vca:
2885 raise vimconn.vimconnConnectionException("self.connect() is failed")
2886 if vdc_name is None:
2887 return None
2888
2889 url_list = [vca.host, '/api/vdcTemplates']
2890 vm_list_rest_call = ''.join(url_list)
2891 response = Http.get(url=vm_list_rest_call,
2892 headers=vca.vcloud_session.get_vcloud_headers(),
2893 verify=vca.verify,
2894 logger=vca.logger)
2895
2896 # container url to a template
2897 vdc_template_ref = None
2898 try:
2899 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2900 for child in vm_list_xmlroot:
2901 # application/vnd.vmware.admin.providervdc+xml
2902 # we need find a template from witch we instantiate VDC
2903 if child.tag.split("}")[1] == 'VdcTemplate':
2904 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
2905 'name') == 'openmano':
2906 vdc_template_ref = child.attrib.get('href')
2907 except:
2908 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2909 self.logger.debug("Respond body {}".format(response.content))
2910 return None
2911
2912 # if we didn't found required pre defined template we return None
2913 if vdc_template_ref is None:
2914 return None
2915
2916 try:
2917 # instantiate vdc
2918 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
2919 vm_list_rest_call = ''.join(url_list)
2920 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2921 <Source href="{1:s}"></Source>
2922 <Description>opnemano</Description>
2923 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
2924 headers = vca.vcloud_session.get_vcloud_headers()
2925 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
2926 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
2927 logger=vca.logger)
2928 # if we all ok we respond with content otherwise by default None
2929 if response.status_code >= 200 and response.status_code < 300:
2930 return response.content
2931 return None
2932 except:
2933 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2934 self.logger.debug("Respond body {}".format(response.content))
2935
2936 return None
2937
2938 def create_vdc_rest(self, vdc_name=None):
2939 """
2940 Method create network in vCloud director
2941
2942 Args:
2943 network_name - is network name to be created.
2944 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2945 It optional attribute. by default if no parent network indicate the first available will be used.
2946
2947 Returns:
2948 The return network uuid or return None
2949 """
2950
2951 self.logger.info("Creating new vdc {}".format(vdc_name))
2952
2953 vca = self.connect_as_admin()
2954 if not vca:
2955 raise vimconn.vimconnConnectionException("self.connect() is failed")
2956 if vdc_name is None:
2957 return None
2958
2959 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
2960 vm_list_rest_call = ''.join(url_list)
2961 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2962 response = Http.get(url=vm_list_rest_call,
2963 headers=vca.vcloud_session.get_vcloud_headers(),
2964 verify=vca.verify,
2965 logger=vca.logger)
2966
2967 provider_vdc_ref = None
2968 add_vdc_rest_url = None
2969 available_networks = None
2970
2971 if response.status_code != requests.codes.ok:
2972 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2973 response.status_code))
2974 return None
2975 else:
2976 try:
2977 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2978 for child in vm_list_xmlroot:
2979 # application/vnd.vmware.admin.providervdc+xml
2980 if child.tag.split("}")[1] == 'Link':
2981 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
2982 and child.attrib.get('rel') == 'add':
2983 add_vdc_rest_url = child.attrib.get('href')
2984 except:
2985 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2986 self.logger.debug("Respond body {}".format(response.content))
2987 return None
2988
2989 response = self.get_provider_rest(vca=vca)
2990 try:
2991 vm_list_xmlroot = XmlElementTree.fromstring(response)
2992 for child in vm_list_xmlroot:
2993 if child.tag.split("}")[1] == 'ProviderVdcReferences':
2994 for sub_child in child:
2995 provider_vdc_ref = sub_child.attrib.get('href')
2996 except:
2997 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2998 self.logger.debug("Respond body {}".format(response))
2999 return None
3000
3001 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3002 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3003 <AllocationModel>ReservationPool</AllocationModel>
3004 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3005 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3006 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3007 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3008 <ProviderVdcReference
3009 name="Main Provider"
3010 href="{2:s}" />
3011 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3012 escape(vdc_name),
3013 provider_vdc_ref)
3014
3015 headers = vca.vcloud_session.get_vcloud_headers()
3016 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3017 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3018 logger=vca.logger)
3019
3020 # if we all ok we respond with content otherwise by default None
3021 if response.status_code == 201:
3022 return response.content
3023 return None
3024
3025 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3026 """
3027 Method retrieve vapp detail from vCloud director
3028
3029 Args:
3030 vapp_uuid - is vapp identifier.
3031
3032 Returns:
3033 The return network uuid or return None
3034 """
3035
3036 parsed_respond = {}
3037 vca = None
3038
3039 if need_admin_access:
3040 vca = self.connect_as_admin()
3041 else:
3042 vca = self.connect()
3043
3044 if not vca:
3045 raise vimconn.vimconnConnectionException("self.connect() is failed")
3046 if vapp_uuid is None:
3047 return None
3048
3049 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3050 get_vapp_restcall = ''.join(url_list)
3051
3052 if vca.vcloud_session and vca.vcloud_session.organization:
3053 response = Http.get(url=get_vapp_restcall,
3054 headers=vca.vcloud_session.get_vcloud_headers(),
3055 verify=vca.verify,
3056 logger=vca.logger)
3057
3058 if response.status_code != requests.codes.ok:
3059 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3060 response.status_code))
3061 return parsed_respond
3062
3063 try:
3064 xmlroot_respond = XmlElementTree.fromstring(response.content)
3065 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3066
3067 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3068 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3069 'vmw': 'http://www.vmware.com/schema/ovf',
3070 'vm': 'http://www.vmware.com/vcloud/v1.5',
3071 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3072 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3073 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3074 }
3075
3076 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3077 if created_section is not None:
3078 parsed_respond['created'] = created_section.text
3079
3080 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3081 if network_section is not None and 'networkName' in network_section.attrib:
3082 parsed_respond['networkname'] = network_section.attrib['networkName']
3083
3084 ipscopes_section = \
3085 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3086 namespaces)
3087 if ipscopes_section is not None:
3088 for ipscope in ipscopes_section:
3089 for scope in ipscope:
3090 tag_key = scope.tag.split("}")[1]
3091 if tag_key == 'IpRanges':
3092 ip_ranges = scope.getchildren()
3093 for ipblock in ip_ranges:
3094 for block in ipblock:
3095 parsed_respond[block.tag.split("}")[1]] = block.text
3096 else:
3097 parsed_respond[tag_key] = scope.text
3098
3099 # parse children section for other attrib
3100 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3101 if children_section is not None:
3102 parsed_respond['name'] = children_section.attrib['name']
3103 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3104 if "nestedHypervisorEnabled" in children_section.attrib else None
3105 parsed_respond['deployed'] = children_section.attrib['deployed']
3106 parsed_respond['status'] = children_section.attrib['status']
3107 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3108 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3109 nic_list = []
3110 for adapters in network_adapter:
3111 adapter_key = adapters.tag.split("}")[1]
3112 if adapter_key == 'PrimaryNetworkConnectionIndex':
3113 parsed_respond['primarynetwork'] = adapters.text
3114 if adapter_key == 'NetworkConnection':
3115 vnic = {}
3116 if 'network' in adapters.attrib:
3117 vnic['network'] = adapters.attrib['network']
3118 for adapter in adapters:
3119 setting_key = adapter.tag.split("}")[1]
3120 vnic[setting_key] = adapter.text
3121 nic_list.append(vnic)
3122
3123 for link in children_section:
3124 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3125 if link.attrib['rel'] == 'screen:acquireTicket':
3126 parsed_respond['acquireTicket'] = link.attrib
3127 if link.attrib['rel'] == 'screen:acquireMksTicket':
3128 parsed_respond['acquireMksTicket'] = link.attrib
3129
3130 parsed_respond['interfaces'] = nic_list
3131 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3132 if vCloud_extension_section is not None:
3133 vm_vcenter_info = {}
3134 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3135 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3136 if vmext is not None:
3137 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3138 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3139
3140 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3141 vm_virtual_hardware_info = {}
3142 if virtual_hardware_section is not None:
3143 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3144 if item.find("rasd:Description",namespaces).text == "Hard disk":
3145 disk_size = item.find("rasd:HostResource" ,namespaces
3146 ).attrib["{"+namespaces['vm']+"}capacity"]
3147
3148 vm_virtual_hardware_info["disk_size"]= disk_size
3149 break
3150
3151 for link in virtual_hardware_section:
3152 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3153 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3154 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3155 break
3156
3157 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3158 except Exception as exp :
3159 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3160 return parsed_respond
3161
3162 def acuire_console(self, vm_uuid=None):
3163
3164 vca = self.connect()
3165 if not vca:
3166 raise vimconn.vimconnConnectionException("self.connect() is failed")
3167 if vm_uuid is None:
3168 return None
3169
3170 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3171 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3172 console_dict = vm_dict['acquireTicket']
3173 console_rest_call = console_dict['href']
3174
3175 response = Http.post(url=console_rest_call,
3176 headers=vca.vcloud_session.get_vcloud_headers(),
3177 verify=vca.verify,
3178 logger=vca.logger)
3179
3180 if response.status_code == requests.codes.ok:
3181 return response.content
3182
3183 return None
3184
3185 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3186 """
3187 Method retrieve vm disk details
3188
3189 Args:
3190 vapp_uuid - is vapp identifier.
3191 flavor_disk - disk size as specified in VNFD (flavor)
3192
3193 Returns:
3194 The return network uuid or return None
3195 """
3196 status = None
3197 try:
3198 #Flavor disk is in GB convert it into MB
3199 flavor_disk = int(flavor_disk) * 1024
3200 vm_details = self.get_vapp_details_rest(vapp_uuid)
3201 if vm_details:
3202 vm_name = vm_details["name"]
3203 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3204
3205 if vm_details and "vm_virtual_hardware" in vm_details:
3206 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3207 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3208
3209 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3210
3211 if flavor_disk > vm_disk:
3212 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3213 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3214 vm_disk, flavor_disk ))
3215 else:
3216 status = True
3217 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3218
3219 return status
3220 except Exception as exp:
3221 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3222
3223
3224 def modify_vm_disk_rest(self, disk_href , disk_size):
3225 """
3226 Method retrieve modify vm disk size
3227
3228 Args:
3229 disk_href - vCD API URL to GET and PUT disk data
3230 disk_size - disk size as specified in VNFD (flavor)
3231
3232 Returns:
3233 The return network uuid or return None
3234 """
3235 vca = self.connect()
3236 if not vca:
3237 raise vimconn.vimconnConnectionException("self.connect() is failed")
3238 if disk_href is None or disk_size is None:
3239 return None
3240
3241 if vca.vcloud_session and vca.vcloud_session.organization:
3242 response = Http.get(url=disk_href,
3243 headers=vca.vcloud_session.get_vcloud_headers(),
3244 verify=vca.verify,
3245 logger=vca.logger)
3246
3247 if response.status_code != requests.codes.ok:
3248 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3249 response.status_code))
3250 return None
3251 try:
3252 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3253 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3254 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3255
3256 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3257 if item.find("rasd:Description",namespaces).text == "Hard disk":
3258 disk_item = item.find("rasd:HostResource" ,namespaces )
3259 if disk_item is not None:
3260 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3261 break
3262
3263 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3264 xml_declaration=True)
3265
3266 #Send PUT request to modify disk size
3267 headers = vca.vcloud_session.get_vcloud_headers()
3268 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3269
3270 response = Http.put(url=disk_href,
3271 data=data,
3272 headers=headers,
3273 verify=vca.verify, logger=self.logger)
3274
3275 if response.status_code != 202:
3276 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3277 response.status_code))
3278 else:
3279 modify_disk_task = taskType.parseString(response.content, True)
3280 if type(modify_disk_task) is GenericTask:
3281 status = vca.block_until_completed(modify_disk_task)
3282 return status
3283
3284 return None
3285
3286 except Exception as exp :
3287 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3288 return None
3289
3290 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3291 """
3292 Method to attach pci devices to VM
3293
3294 Args:
3295 vapp_uuid - uuid of vApp/VM
3296 pci_devices - pci devices infromation as specified in VNFD (flavor)
3297
3298 Returns:
3299 The status of add pci device task , vm object and
3300 vcenter_conect object
3301 """
3302 vm_obj = None
3303 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3304 vcenter_conect, content = self.get_vcenter_content()
3305 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3306
3307 if vm_moref_id:
3308 try:
3309 no_of_pci_devices = len(pci_devices)
3310 if no_of_pci_devices > 0:
3311 #Get VM and its host
3312 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3313 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3314 if host_obj and vm_obj:
3315 #get PCI devies from host on which vapp is currently installed
3316 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3317
3318 if avilable_pci_devices is None:
3319 #find other hosts with active pci devices
3320 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3321 content,
3322 no_of_pci_devices
3323 )
3324
3325 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3326 #Migrate vm to the host where PCI devices are availble
3327 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3328 task = self.relocate_vm(new_host_obj, vm_obj)
3329 if task is not None:
3330 result = self.wait_for_vcenter_task(task, vcenter_conect)
3331 self.logger.info("Migrate VM status: {}".format(result))
3332 host_obj = new_host_obj
3333 else:
3334 self.logger.info("Fail to migrate VM : {}".format(result))
3335 raise vimconn.vimconnNotFoundException(
3336 "Fail to migrate VM : {} to host {}".format(
3337 vmname_andid,
3338 new_host_obj)
3339 )
3340
3341 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3342 #Add PCI devices one by one
3343 for pci_device in avilable_pci_devices:
3344 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3345 if task:
3346 status= self.wait_for_vcenter_task(task, vcenter_conect)
3347 if status:
3348 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3349 else:
3350 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3351 return True, vm_obj, vcenter_conect
3352 else:
3353 self.logger.error("Currently there is no host with"\
3354 " {} number of avaialble PCI devices required for VM {}".format(
3355 no_of_pci_devices,
3356 vmname_andid)
3357 )
3358 raise vimconn.vimconnNotFoundException(
3359 "Currently there is no host with {} "\
3360 "number of avaialble PCI devices required for VM {}".format(
3361 no_of_pci_devices,
3362 vmname_andid))
3363 else:
3364 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3365
3366 except vmodl.MethodFault as error:
3367 self.logger.error("Error occurred while adding PCI devices {} ",error)
3368 return None, vm_obj, vcenter_conect
3369
3370 def get_vm_obj(self, content, mob_id):
3371 """
3372 Method to get the vsphere VM object associated with a given morf ID
3373 Args:
3374 vapp_uuid - uuid of vApp/VM
3375 content - vCenter content object
3376 mob_id - mob_id of VM
3377
3378 Returns:
3379 VM and host object
3380 """
3381 vm_obj = None
3382 host_obj = None
3383 try :
3384 container = content.viewManager.CreateContainerView(content.rootFolder,
3385 [vim.VirtualMachine], True
3386 )
3387 for vm in container.view:
3388 mobID = vm._GetMoId()
3389 if mobID == mob_id:
3390 vm_obj = vm
3391 host_obj = vm_obj.runtime.host
3392 break
3393 except Exception as exp:
3394 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3395 return host_obj, vm_obj
3396
3397 def get_pci_devices(self, host, need_devices):
3398 """
3399 Method to get the details of pci devices on given host
3400 Args:
3401 host - vSphere host object
3402 need_devices - number of pci devices needed on host
3403
3404 Returns:
3405 array of pci devices
3406 """
3407 all_devices = []
3408 all_device_ids = []
3409 used_devices_ids = []
3410
3411 try:
3412 if host:
3413 pciPassthruInfo = host.config.pciPassthruInfo
3414 pciDevies = host.hardware.pciDevice
3415
3416 for pci_status in pciPassthruInfo:
3417 if pci_status.passthruActive:
3418 for device in pciDevies:
3419 if device.id == pci_status.id:
3420 all_device_ids.append(device.id)
3421 all_devices.append(device)
3422
3423 #check if devices are in use
3424 avalible_devices = all_devices
3425 for vm in host.vm:
3426 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3427 vm_devices = vm.config.hardware.device
3428 for device in vm_devices:
3429 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3430 if device.backing.id in all_device_ids:
3431 for use_device in avalible_devices:
3432 if use_device.id == device.backing.id:
3433 avalible_devices.remove(use_device)
3434 used_devices_ids.append(device.backing.id)
3435 self.logger.debug("Device {} from devices {}"\
3436 "is in use".format(device.backing.id,
3437 device)
3438 )
3439 if len(avalible_devices) < need_devices:
3440 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3441 need_devices))
3442 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3443 avalible_devices))
3444 return None
3445 else:
3446 required_devices = avalible_devices[:need_devices]
3447 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3448 len(avalible_devices),
3449 host,
3450 need_devices))
3451 self.logger.info("Retruning {} devices as {}".format(need_devices,
3452 required_devices ))
3453 return required_devices
3454
3455 except Exception as exp:
3456 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3457
3458 return None
3459
3460 def get_host_and_PCIdevices(self, content, need_devices):
3461 """
3462 Method to get the details of pci devices infromation on all hosts
3463
3464 Args:
3465 content - vSphere host object
3466 need_devices - number of pci devices needed on host
3467
3468 Returns:
3469 array of pci devices and host object
3470 """
3471 host_obj = None
3472 pci_device_objs = None
3473 try:
3474 if content:
3475 container = content.viewManager.CreateContainerView(content.rootFolder,
3476 [vim.HostSystem], True)
3477 for host in container.view:
3478 devices = self.get_pci_devices(host, need_devices)
3479 if devices:
3480 host_obj = host
3481 pci_device_objs = devices
3482 break
3483 except Exception as exp:
3484 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3485
3486 return host_obj,pci_device_objs
3487
3488 def relocate_vm(self, dest_host, vm) :
3489 """
3490 Method to get the relocate VM to new host
3491
3492 Args:
3493 dest_host - vSphere host object
3494 vm - vSphere VM object
3495
3496 Returns:
3497 task object
3498 """
3499 task = None
3500 try:
3501 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3502 task = vm.Relocate(relocate_spec)
3503 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3504 except Exception as exp:
3505 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3506 dest_host, vm, exp))
3507 return task
3508
3509 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3510 """
3511 Waits and provides updates on a vSphere task
3512 """
3513 while task.info.state == vim.TaskInfo.State.running:
3514 time.sleep(2)
3515
3516 if task.info.state == vim.TaskInfo.State.success:
3517 if task.info.result is not None and not hideResult:
3518 self.logger.info('{} completed successfully, result: {}'.format(
3519 actionName,
3520 task.info.result))
3521 else:
3522 self.logger.info('Task {} completed successfully.'.format(actionName))
3523 else:
3524 self.logger.error('{} did not complete successfully: {} '.format(
3525 actionName,
3526 task.info.error)
3527 )
3528
3529 return task.info.result
3530
3531 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3532 """
3533 Method to add pci device in given VM
3534
3535 Args:
3536 host_object - vSphere host object
3537 vm_object - vSphere VM object
3538 host_pci_dev - host_pci_dev must be one of the devices from the
3539 host_object.hardware.pciDevice list
3540 which is configured as a PCI passthrough device
3541
3542 Returns:
3543 task object
3544 """
3545 task = None
3546 if vm_object and host_object and host_pci_dev:
3547 try :
3548 #Add PCI device to VM
3549 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3550 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3551
3552 if host_pci_dev.id not in systemid_by_pciid:
3553 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3554 return None
3555
3556 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3557 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3558 id=host_pci_dev.id,
3559 systemId=systemid_by_pciid[host_pci_dev.id],
3560 vendorId=host_pci_dev.vendorId,
3561 deviceName=host_pci_dev.deviceName)
3562
3563 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3564
3565 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3566 new_device_config.operation = "add"
3567 vmConfigSpec = vim.vm.ConfigSpec()
3568 vmConfigSpec.deviceChange = [new_device_config]
3569
3570 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3571 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3572 host_pci_dev, vm_object, host_object)
3573 )
3574 except Exception as exp:
3575 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3576 host_pci_dev,
3577 vm_object,
3578 exp))
3579 return task
3580
3581 def get_vm_vcenter_info(self):
3582 """
3583 Method to get details of vCenter and vm
3584
3585 Args:
3586 vapp_uuid - uuid of vApp or VM
3587
3588 Returns:
3589 Moref Id of VM and deails of vCenter
3590 """
3591 vm_vcenter_info = {}
3592
3593 if self.vcenter_ip is not None:
3594 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3595 else:
3596 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3597 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3598 if self.vcenter_port is not None:
3599 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3600 else:
3601 raise vimconn.vimconnException(message="vCenter port is not provided."\
3602 " Please provide vCenter port while attaching datacenter to tenant in --config")
3603 if self.vcenter_user is not None:
3604 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3605 else:
3606 raise vimconn.vimconnException(message="vCenter user is not provided."\
3607 " Please provide vCenter user while attaching datacenter to tenant in --config")
3608
3609 if self.vcenter_password is not None:
3610 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3611 else:
3612 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3613 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3614
3615 return vm_vcenter_info
3616
3617
3618 def get_vm_pci_details(self, vmuuid):
3619 """
3620 Method to get VM PCI device details from vCenter
3621
3622 Args:
3623 vm_obj - vSphere VM object
3624
3625 Returns:
3626 dict of PCI devives attached to VM
3627
3628 """
3629 vm_pci_devices_info = {}
3630 try:
3631 vcenter_conect, content = self.get_vcenter_content()
3632 vm_moref_id = self.get_vm_moref_id(vmuuid)
3633 if vm_moref_id:
3634 #Get VM and its host
3635 if content:
3636 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3637 if host_obj and vm_obj:
3638 vm_pci_devices_info["host_name"]= host_obj.name
3639 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3640 for device in vm_obj.config.hardware.device:
3641 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3642 device_details={'devide_id':device.backing.id,
3643 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3644 }
3645 vm_pci_devices_info[device.deviceInfo.label] = device_details
3646 else:
3647 self.logger.error("Can not connect to vCenter while getting "\
3648 "PCI devices infromationn")
3649 return vm_pci_devices_info
3650 except Exception as exp:
3651 self.logger.error("Error occurred while getting VM infromationn"\
3652 " for VM : {}".format(exp))
3653 raise vimconn.vimconnException(message=exp)
3654
3655 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3656 """
3657 Method to add network adapter type to vm
3658 Args :
3659 network_name - name of network
3660 primary_nic_index - int value for primary nic index
3661 nicIndex - int value for nic index
3662 nic_type - specify model name to which add to vm
3663 Returns:
3664 None
3665 """
3666 vca = self.connect()
3667 if not vca:
3668 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3669
3670 try:
3671 ip_address = None
3672 floating_ip = False
3673 if 'floating_ip' in net: floating_ip = net['floating_ip']
3674
3675 # Stub for ip_address feature
3676 if 'ip_address' in net: ip_address = net['ip_address']
3677
3678 if floating_ip:
3679 allocation_mode = "POOL"
3680 elif ip_address:
3681 allocation_mode = "MANUAL"
3682 else:
3683 allocation_mode = "DHCP"
3684
3685 if not nic_type:
3686 for vms in vapp._get_vms():
3687 vm_id = (vms.id).split(':')[-1]
3688
3689 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3690
3691 response = Http.get(url=url_rest_call,
3692 headers=vca.vcloud_session.get_vcloud_headers(),
3693 verify=vca.verify,
3694 logger=vca.logger)
3695 if response.status_code != 200:
3696 self.logger.error("REST call {} failed reason : {}"\
3697 "status code : {}".format(url_rest_call,
3698 response.content,
3699 response.status_code))
3700 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3701 "network connection section")
3702
3703 data = response.content
3704 if '<PrimaryNetworkConnectionIndex>' not in data:
3705 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3706 <NetworkConnection network="{}">
3707 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3708 <IsConnected>true</IsConnected>
3709 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3710 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3711 allocation_mode)
3712 # Stub for ip_address feature
3713 if ip_address:
3714 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3715 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3716
3717 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3718 else:
3719 new_item = """<NetworkConnection network="{}">
3720 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3721 <IsConnected>true</IsConnected>
3722 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3723 </NetworkConnection>""".format(network_name, nicIndex,
3724 allocation_mode)
3725 # Stub for ip_address feature
3726 if ip_address:
3727 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3728 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3729
3730 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3731
3732 headers = vca.vcloud_session.get_vcloud_headers()
3733 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3734 response = Http.put(url=url_rest_call, headers=headers, data=data,
3735 verify=vca.verify,
3736 logger=vca.logger)
3737 if response.status_code != 202:
3738 self.logger.error("REST call {} failed reason : {}"\
3739 "status code : {} ".format(url_rest_call,
3740 response.content,
3741 response.status_code))
3742 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3743 "network connection section")
3744 else:
3745 nic_task = taskType.parseString(response.content, True)
3746 if isinstance(nic_task, GenericTask):
3747 vca.block_until_completed(nic_task)
3748 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3749 "default NIC type".format(vm_id))
3750 else:
3751 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3752 "connect NIC type".format(vm_id))
3753 else:
3754 for vms in vapp._get_vms():
3755 vm_id = (vms.id).split(':')[-1]
3756
3757 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3758
3759 response = Http.get(url=url_rest_call,
3760 headers=vca.vcloud_session.get_vcloud_headers(),
3761 verify=vca.verify,
3762 logger=vca.logger)
3763 if response.status_code != 200:
3764 self.logger.error("REST call {} failed reason : {}"\
3765 "status code : {}".format(url_rest_call,
3766 response.content,
3767 response.status_code))
3768 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3769 "network connection section")
3770 data = response.content
3771 if '<PrimaryNetworkConnectionIndex>' not in data:
3772 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3773 <NetworkConnection network="{}">
3774 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3775 <IsConnected>true</IsConnected>
3776 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3777 <NetworkAdapterType>{}</NetworkAdapterType>
3778 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3779 allocation_mode, nic_type)
3780 # Stub for ip_address feature
3781 if ip_address:
3782 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3783 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3784
3785 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3786 else:
3787 new_item = """<NetworkConnection network="{}">
3788 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3789 <IsConnected>true</IsConnected>
3790 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3791 <NetworkAdapterType>{}</NetworkAdapterType>
3792 </NetworkConnection>""".format(network_name, nicIndex,
3793 allocation_mode, nic_type)
3794 # Stub for ip_address feature
3795 if ip_address:
3796 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3797 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3798
3799 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3800
3801 headers = vca.vcloud_session.get_vcloud_headers()
3802 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3803 response = Http.put(url=url_rest_call, headers=headers, data=data,
3804 verify=vca.verify,
3805 logger=vca.logger)
3806
3807 if response.status_code != 202:
3808 self.logger.error("REST call {} failed reason : {}"\
3809 "status code : {}".format(url_rest_call,
3810 response.content,
3811 response.status_code))
3812 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3813 "network connection section")
3814 else:
3815 nic_task = taskType.parseString(response.content, True)
3816 if isinstance(nic_task, GenericTask):
3817 vca.block_until_completed(nic_task)
3818 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3819 "conneced to NIC type {}".format(vm_id, nic_type))
3820 else:
3821 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3822 "failed to connect NIC type {}".format(vm_id, nic_type))
3823 except Exception as exp:
3824 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3825 "while adding Network adapter")
3826 raise vimconn.vimconnException(message=exp)
3827
3828
3829 def set_numa_affinity(self, vmuuid, paired_threads_id):
3830 """
3831 Method to assign numa affinity in vm configuration parammeters
3832 Args :
3833 vmuuid - vm uuid
3834 paired_threads_id - one or more virtual processor
3835 numbers
3836 Returns:
3837 return if True
3838 """
3839 try:
3840 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3841 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3842 context = None
3843 if hasattr(ssl, '_create_unverified_context'):
3844 context = ssl._create_unverified_context()
3845 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3846 pwd=self.passwd, port=int(vm_vcenter_port),
3847 sslContext=context)
3848 atexit.register(Disconnect, vcenter_conect)
3849 content = vcenter_conect.RetrieveContent()
3850
3851 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3852 if vm_obj:
3853 config_spec = vim.vm.ConfigSpec()
3854 config_spec.extraConfig = []
3855 opt = vim.option.OptionValue()
3856 opt.key = 'numa.nodeAffinity'
3857 opt.value = str(paired_threads_id)
3858 config_spec.extraConfig.append(opt)
3859 task = vm_obj.ReconfigVM_Task(config_spec)
3860 if task:
3861 result = self.wait_for_vcenter_task(task, vcenter_conect)
3862 extra_config = vm_obj.config.extraConfig
3863 flag = False
3864 for opts in extra_config:
3865 if 'numa.nodeAffinity' in opts.key:
3866 flag = True
3867 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
3868 "value {} for vm {}".format(opt.value, vm_obj))
3869 if flag:
3870 return
3871 else:
3872 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
3873 except Exception as exp:
3874 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
3875 "for VM {} : {}".format(vm_obj, vm_moref_id))
3876 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
3877 "affinity".format(exp))
3878
3879
3880
3881 def cloud_init(self, vapp, cloud_config):
3882 """
3883 Method to inject ssh-key
3884 vapp - vapp object
3885 cloud_config a dictionary with:
3886 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
3887 'users': (optional) list of users to be inserted, each item is a dict with:
3888 'name': (mandatory) user name,
3889 'key-pairs': (optional) list of strings with the public key to be inserted to the user
3890 'user-data': (optional) string is a text script to be passed directly to cloud-init
3891 'config-files': (optional). List of files to be transferred. Each item is a dict with:
3892 'dest': (mandatory) string with the destination absolute path
3893 'encoding': (optional, by default text). Can be one of:
3894 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
3895 'content' (mandatory): string with the content of the file
3896 'permissions': (optional) string with file permissions, typically octal notation '0644'
3897 'owner': (optional) file owner, string with the format 'owner:group'
3898 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
3899 """
3900 vca = self.connect()
3901 if not vca:
3902 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3903
3904 try:
3905 if isinstance(cloud_config, dict):
3906 key_pairs = []
3907 userdata = []
3908 if "key-pairs" in cloud_config:
3909 key_pairs = cloud_config["key-pairs"]
3910
3911 if "users" in cloud_config:
3912 userdata = cloud_config["users"]
3913
3914 for key in key_pairs:
3915 for user in userdata:
3916 if 'name' in user: user_name = user['name']
3917 if 'key-pairs' in user and len(user['key-pairs']) > 0:
3918 for user_key in user['key-pairs']:
3919 customize_script = """
3920 #!/bin/bash
3921 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3922 if [ "$1" = "precustomization" ];then
3923 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3924 if [ ! -d /root/.ssh ];then
3925 mkdir /root/.ssh
3926 chown root:root /root/.ssh
3927 chmod 700 /root/.ssh
3928 touch /root/.ssh/authorized_keys
3929 chown root:root /root/.ssh/authorized_keys
3930 chmod 600 /root/.ssh/authorized_keys
3931 # make centos with selinux happy
3932 which restorecon && restorecon -Rv /root/.ssh
3933 echo '{key}' >> /root/.ssh/authorized_keys
3934 else
3935 touch /root/.ssh/authorized_keys
3936 chown root:root /root/.ssh/authorized_keys
3937 chmod 600 /root/.ssh/authorized_keys
3938 echo '{key}' >> /root/.ssh/authorized_keys
3939 fi
3940 if [ -d /home/{user_name} ];then
3941 if [ ! -d /home/{user_name}/.ssh ];then
3942 mkdir /home/{user_name}/.ssh
3943 chown {user_name}:{user_name} /home/{user_name}/.ssh
3944 chmod 700 /home/{user_name}/.ssh
3945 touch /home/{user_name}/.ssh/authorized_keys
3946 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3947 chmod 600 /home/{user_name}/.ssh/authorized_keys
3948 # make centos with selinux happy
3949 which restorecon && restorecon -Rv /home/{user_name}/.ssh
3950 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3951 else
3952 touch /home/{user_name}/.ssh/authorized_keys
3953 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3954 chmod 600 /home/{user_name}/.ssh/authorized_keys
3955 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3956 fi
3957 fi
3958 fi""".format(key=key, user_name=user_name, user_key=user_key)
3959
3960 for vm in vapp._get_vms():
3961 vm_name = vm.name
3962 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
3963 if isinstance(task, GenericTask):
3964 vca.block_until_completed(task)
3965 self.logger.info("cloud_init : customized guest os task "\
3966 "completed for VM {}".format(vm_name))
3967 else:
3968 self.logger.error("cloud_init : task for customized guest os"\
3969 "failed for VM {}".format(vm_name))
3970 except Exception as exp:
3971 self.logger.error("cloud_init : exception occurred while injecting "\
3972 "ssh-key")
3973 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
3974 "ssh-key".format(exp))
3975
3976
3977 def add_new_disk(self, vca, vapp_uuid, disk_size):
3978 """
3979 Method to create an empty vm disk
3980
3981 Args:
3982 vapp_uuid - is vapp identifier.
3983 disk_size - size of disk to be created in GB
3984
3985 Returns:
3986 None
3987 """
3988 status = False
3989 vm_details = None
3990 try:
3991 #Disk size in GB, convert it into MB
3992 if disk_size is not None:
3993 disk_size_mb = int(disk_size) * 1024
3994 vm_details = self.get_vapp_details_rest(vapp_uuid)
3995
3996 if vm_details and "vm_virtual_hardware" in vm_details:
3997 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
3998 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3999 status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
4000
4001 except Exception as exp:
4002 msg = "Error occurred while creating new disk {}.".format(exp)
4003 self.rollback_newvm(vapp_uuid, msg)
4004
4005 if status:
4006 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4007 else:
4008 #If failed to add disk, delete VM
4009 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4010 self.rollback_newvm(vapp_uuid, msg)
4011
4012
4013 def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
4014 """
4015 Retrives vApp Disks section & add new empty disk
4016
4017 Args:
4018 disk_href: Disk section href to addd disk
4019 disk_size_mb: Disk size in MB
4020
4021 Returns: Status of add new disk task
4022 """
4023 status = False
4024 if vca.vcloud_session and vca.vcloud_session.organization:
4025 response = Http.get(url=disk_href,
4026 headers=vca.vcloud_session.get_vcloud_headers(),
4027 verify=vca.verify,
4028 logger=vca.logger)
4029
4030 if response.status_code != requests.codes.ok:
4031 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4032 .format(disk_href, response.status_code))
4033 return status
4034 try:
4035 #Find but type & max of instance IDs assigned to disks
4036 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4037 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4038 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4039 instance_id = 0
4040 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4041 if item.find("rasd:Description",namespaces).text == "Hard disk":
4042 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4043 if inst_id > instance_id:
4044 instance_id = inst_id
4045 disk_item = item.find("rasd:HostResource" ,namespaces)
4046 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4047 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4048
4049 instance_id = instance_id + 1
4050 new_item = """<Item>
4051 <rasd:Description>Hard disk</rasd:Description>
4052 <rasd:ElementName>New disk</rasd:ElementName>
4053 <rasd:HostResource
4054 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4055 vcloud:capacity="{}"
4056 vcloud:busSubType="{}"
4057 vcloud:busType="{}"></rasd:HostResource>
4058 <rasd:InstanceID>{}</rasd:InstanceID>
4059 <rasd:ResourceType>17</rasd:ResourceType>
4060 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4061
4062 new_data = response.content
4063 #Add new item at the bottom
4064 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4065
4066 # Send PUT request to modify virtual hardware section with new disk
4067 headers = vca.vcloud_session.get_vcloud_headers()
4068 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4069
4070 response = Http.put(url=disk_href,
4071 data=new_data,
4072 headers=headers,
4073 verify=vca.verify, logger=self.logger)
4074
4075 if response.status_code != 202:
4076 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4077 .format(disk_href, response.status_code, response.content))
4078 else:
4079 add_disk_task = taskType.parseString(response.content, True)
4080 if type(add_disk_task) is GenericTask:
4081 status = vca.block_until_completed(add_disk_task)
4082 if not status:
4083 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4084
4085 except Exception as exp:
4086 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4087
4088 return status
4089
4090
4091 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4092 """
4093 Method to add existing disk to vm
4094 Args :
4095 catalogs - List of VDC catalogs
4096 image_id - Catalog ID
4097 template_name - Name of template in catalog
4098 vapp_uuid - UUID of vApp
4099 Returns:
4100 None
4101 """
4102 disk_info = None
4103 vcenter_conect, content = self.get_vcenter_content()
4104 #find moref-id of vm in image
4105 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4106 image_id=image_id,
4107 )
4108
4109 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4110 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4111 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4112 if catalog_vm_moref_id:
4113 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4114 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4115 if catalog_vm_obj:
4116 #find existing disk
4117 disk_info = self.find_disk(catalog_vm_obj)
4118 else:
4119 exp_msg = "No VM with image id {} found".format(image_id)
4120 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4121 else:
4122 exp_msg = "No Image found with image ID {} ".format(image_id)
4123 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4124
4125 if disk_info:
4126 self.logger.info("Existing disk_info : {}".format(disk_info))
4127 #get VM
4128 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4129 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4130 if vm_obj:
4131 status = self.add_disk(vcenter_conect=vcenter_conect,
4132 vm=vm_obj,
4133 disk_info=disk_info,
4134 size=size,
4135 vapp_uuid=vapp_uuid
4136 )
4137 if status:
4138 self.logger.info("Disk from image id {} added to {}".format(image_id,
4139 vm_obj.config.name)
4140 )
4141 else:
4142 msg = "No disk found with image id {} to add in VM {}".format(
4143 image_id,
4144 vm_obj.config.name)
4145 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4146
4147
4148 def find_disk(self, vm_obj):
4149 """
4150 Method to find details of existing disk in VM
4151 Args :
4152 vm_obj - vCenter object of VM
4153 image_id - Catalog ID
4154 Returns:
4155 disk_info : dict of disk details
4156 """
4157 disk_info = {}
4158 if vm_obj:
4159 try:
4160 devices = vm_obj.config.hardware.device
4161 for device in devices:
4162 if type(device) is vim.vm.device.VirtualDisk:
4163 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4164 disk_info["full_path"] = device.backing.fileName
4165 disk_info["datastore"] = device.backing.datastore
4166 disk_info["capacityKB"] = device.capacityInKB
4167 break
4168 except Exception as exp:
4169 self.logger.error("find_disk() : exception occurred while "\
4170 "getting existing disk details :{}".format(exp))
4171 return disk_info
4172
4173
4174 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4175 """
4176 Method to add existing disk in VM
4177 Args :
4178 vcenter_conect - vCenter content object
4179 vm - vCenter vm object
4180 disk_info : dict of disk details
4181 Returns:
4182 status : status of add disk task
4183 """
4184 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4185 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4186 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4187 if size is not None:
4188 #Convert size from GB to KB
4189 sizeKB = int(size) * 1024 * 1024
4190 #compare size of existing disk and user given size.Assign whicherver is greater
4191 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4192 sizeKB, capacityKB))
4193 if sizeKB > capacityKB:
4194 capacityKB = sizeKB
4195
4196 if datastore and fullpath and capacityKB:
4197 try:
4198 spec = vim.vm.ConfigSpec()
4199 # get all disks on a VM, set unit_number to the next available
4200 unit_number = 0
4201 for dev in vm.config.hardware.device:
4202 if hasattr(dev.backing, 'fileName'):
4203 unit_number = int(dev.unitNumber) + 1
4204 # unit_number 7 reserved for scsi controller
4205 if unit_number == 7:
4206 unit_number += 1
4207 if isinstance(dev, vim.vm.device.VirtualDisk):
4208 #vim.vm.device.VirtualSCSIController
4209 controller_key = dev.controllerKey
4210
4211 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4212 unit_number, controller_key))
4213 # add disk here
4214 dev_changes = []
4215 disk_spec = vim.vm.device.VirtualDeviceSpec()
4216 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4217 disk_spec.device = vim.vm.device.VirtualDisk()
4218 disk_spec.device.backing = \
4219 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4220 disk_spec.device.backing.thinProvisioned = True
4221 disk_spec.device.backing.diskMode = 'persistent'
4222 disk_spec.device.backing.datastore = datastore
4223 disk_spec.device.backing.fileName = fullpath
4224
4225 disk_spec.device.unitNumber = unit_number
4226 disk_spec.device.capacityInKB = capacityKB
4227 disk_spec.device.controllerKey = controller_key
4228 dev_changes.append(disk_spec)
4229 spec.deviceChange = dev_changes
4230 task = vm.ReconfigVM_Task(spec=spec)
4231 status = self.wait_for_vcenter_task(task, vcenter_conect)
4232 return status
4233 except Exception as exp:
4234 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4235 "{} to vm {}".format(exp,
4236 fullpath,
4237 vm.config.name)
4238 self.rollback_newvm(vapp_uuid, exp_msg)
4239 else:
4240 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4241 self.rollback_newvm(vapp_uuid, msg)
4242
4243
4244 def get_vcenter_content(self):
4245 """
4246 Get the vsphere content object
4247 """
4248 try:
4249 vm_vcenter_info = self.get_vm_vcenter_info()
4250 except Exception as exp:
4251 self.logger.error("Error occurred while getting vCenter infromationn"\
4252 " for VM : {}".format(exp))
4253 raise vimconn.vimconnException(message=exp)
4254
4255 context = None
4256 if hasattr(ssl, '_create_unverified_context'):
4257 context = ssl._create_unverified_context()
4258
4259 vcenter_conect = SmartConnect(
4260 host=vm_vcenter_info["vm_vcenter_ip"],
4261 user=vm_vcenter_info["vm_vcenter_user"],
4262 pwd=vm_vcenter_info["vm_vcenter_password"],
4263 port=int(vm_vcenter_info["vm_vcenter_port"]),
4264 sslContext=context
4265 )
4266 atexit.register(Disconnect, vcenter_conect)
4267 content = vcenter_conect.RetrieveContent()
4268 return vcenter_conect, content
4269
4270
4271 def get_vm_moref_id(self, vapp_uuid):
4272 """
4273 Get the moref_id of given VM
4274 """
4275 try:
4276 if vapp_uuid:
4277 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4278 if vm_details and "vm_vcenter_info" in vm_details:
4279 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4280
4281 return vm_moref_id
4282
4283 except Exception as exp:
4284 self.logger.error("Error occurred while getting VM moref ID "\
4285 " for VM : {}".format(exp))
4286 return None
4287
4288
4289 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4290 """
4291 Method to get vApp template details
4292 Args :
4293 catalogs - list of VDC catalogs
4294 image_id - Catalog ID to find
4295 template_name : template name in catalog
4296 Returns:
4297 parsed_respond : dict of vApp tempalte details
4298 """
4299 parsed_response = {}
4300
4301 vca = self.connect_as_admin()
4302 if not vca:
4303 raise vimconn.vimconnConnectionException("self.connect() is failed")
4304
4305 try:
4306 catalog = self.get_catalog_obj(image_id, catalogs)
4307 if catalog:
4308 template_name = self.get_catalogbyid(image_id, catalogs)
4309 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4310 if len(catalog_items) == 1:
4311 response = Http.get(catalog_items[0].get_href(),
4312 headers=vca.vcloud_session.get_vcloud_headers(),
4313 verify=vca.verify,
4314 logger=vca.logger)
4315 catalogItem = XmlElementTree.fromstring(response.content)
4316 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4317 vapp_tempalte_href = entity.get("href")
4318 #get vapp details and parse moref id
4319
4320 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4321 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4322 'vmw': 'http://www.vmware.com/schema/ovf',
4323 'vm': 'http://www.vmware.com/vcloud/v1.5',
4324 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4325 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4326 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4327 }
4328
4329 if vca.vcloud_session and vca.vcloud_session.organization:
4330 response = Http.get(url=vapp_tempalte_href,
4331 headers=vca.vcloud_session.get_vcloud_headers(),
4332 verify=vca.verify,
4333 logger=vca.logger
4334 )
4335
4336 if response.status_code != requests.codes.ok:
4337 self.logger.debug("REST API call {} failed. Return status code {}".format(
4338 vapp_tempalte_href, response.status_code))
4339
4340 else:
4341 xmlroot_respond = XmlElementTree.fromstring(response.content)
4342 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4343 if children_section is not None:
4344 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4345 if vCloud_extension_section is not None:
4346 vm_vcenter_info = {}
4347 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4348 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4349 if vmext is not None:
4350 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4351 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4352
4353 except Exception as exp :
4354 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4355
4356 return parsed_response
4357
4358
4359 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4360 """
4361 Method to delete vApp
4362 Args :
4363 vapp_uuid - vApp UUID
4364 msg - Error message to be logged
4365 exp_type : Exception type
4366 Returns:
4367 None
4368 """
4369 if vapp_uuid:
4370 status = self.delete_vminstance(vapp_uuid)
4371 else:
4372 msg = "No vApp ID"
4373 self.logger.error(msg)
4374 if exp_type == "Genric":
4375 raise vimconn.vimconnException(msg)
4376 elif exp_type == "NotFound":
4377 raise vimconn.vimconnNotFoundException(message=msg)
4378
4379 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4380 """
4381 Method to attach SRIOV adapters to VM
4382
4383 Args:
4384 vapp_uuid - uuid of vApp/VM
4385 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4386 vmname_andid - vmname
4387
4388 Returns:
4389 The status of add SRIOV adapter task , vm object and
4390 vcenter_conect object
4391 """
4392 vm_obj = None
4393 vcenter_conect, content = self.get_vcenter_content()
4394 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4395
4396 if vm_moref_id:
4397 try:
4398 no_of_sriov_devices = len(sriov_nets)
4399 if no_of_sriov_devices > 0:
4400 #Get VM and its host
4401 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4402 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4403 if host_obj and vm_obj:
4404 #get SRIOV devies from host on which vapp is currently installed
4405 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4406 no_of_sriov_devices,
4407 )
4408
4409 if len(avilable_sriov_devices) == 0:
4410 #find other hosts with active pci devices
4411 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4412 content,
4413 no_of_sriov_devices,
4414 )
4415
4416 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4417 #Migrate vm to the host where SRIOV devices are available
4418 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4419 new_host_obj))
4420 task = self.relocate_vm(new_host_obj, vm_obj)
4421 if task is not None:
4422 result = self.wait_for_vcenter_task(task, vcenter_conect)
4423 self.logger.info("Migrate VM status: {}".format(result))
4424 host_obj = new_host_obj
4425 else:
4426 self.logger.info("Fail to migrate VM : {}".format(result))
4427 raise vimconn.vimconnNotFoundException(
4428 "Fail to migrate VM : {} to host {}".format(
4429 vmname_andid,
4430 new_host_obj)
4431 )
4432
4433 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4434 #Add SRIOV devices one by one
4435 for sriov_net in sriov_nets:
4436 network_name = sriov_net.get('net_id')
4437 dvs_portgr_name = self.create_dvPort_group(network_name)
4438 if sriov_net.get('type') == "VF":
4439 #add vlan ID ,Modify portgroup for vlan ID
4440 self.configure_vlanID(content, vcenter_conect, network_name)
4441
4442 task = self.add_sriov_to_vm(content,
4443 vm_obj,
4444 host_obj,
4445 network_name,
4446 avilable_sriov_devices[0]
4447 )
4448 if task:
4449 status= self.wait_for_vcenter_task(task, vcenter_conect)
4450 if status:
4451 self.logger.info("Added SRIOV {} to VM {}".format(
4452 no_of_sriov_devices,
4453 str(vm_obj)))
4454 else:
4455 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4456 no_of_sriov_devices,
4457 str(vm_obj)))
4458 raise vimconn.vimconnUnexpectedResponse(
4459 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4460 )
4461 return True, vm_obj, vcenter_conect
4462 else:
4463 self.logger.error("Currently there is no host with"\
4464 " {} number of avaialble SRIOV "\
4465 "VFs required for VM {}".format(
4466 no_of_sriov_devices,
4467 vmname_andid)
4468 )
4469 raise vimconn.vimconnNotFoundException(
4470 "Currently there is no host with {} "\
4471 "number of avaialble SRIOV devices required for VM {}".format(
4472 no_of_sriov_devices,
4473 vmname_andid))
4474 else:
4475 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4476
4477 except vmodl.MethodFault as error:
4478 self.logger.error("Error occurred while adding SRIOV {} ",error)
4479 return None, vm_obj, vcenter_conect
4480
4481
4482 def get_sriov_devices(self,host, no_of_vfs):
4483 """
4484 Method to get the details of SRIOV devices on given host
4485 Args:
4486 host - vSphere host object
4487 no_of_vfs - number of VFs needed on host
4488
4489 Returns:
4490 array of SRIOV devices
4491 """
4492 sriovInfo=[]
4493 if host:
4494 for device in host.config.pciPassthruInfo:
4495 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4496 if device.numVirtualFunction >= no_of_vfs:
4497 sriovInfo.append(device)
4498 break
4499 return sriovInfo
4500
4501
4502 def get_host_and_sriov_devices(self, content, no_of_vfs):
4503 """
4504 Method to get the details of SRIOV devices infromation on all hosts
4505
4506 Args:
4507 content - vSphere host object
4508 no_of_vfs - number of pci VFs needed on host
4509
4510 Returns:
4511 array of SRIOV devices and host object
4512 """
4513 host_obj = None
4514 sriov_device_objs = None
4515 try:
4516 if content:
4517 container = content.viewManager.CreateContainerView(content.rootFolder,
4518 [vim.HostSystem], True)
4519 for host in container.view:
4520 devices = self.get_sriov_devices(host, no_of_vfs)
4521 if devices:
4522 host_obj = host
4523 sriov_device_objs = devices
4524 break
4525 except Exception as exp:
4526 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4527
4528 return host_obj,sriov_device_objs
4529
4530
4531 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4532 """
4533 Method to add SRIOV adapter to vm
4534
4535 Args:
4536 host_obj - vSphere host object
4537 vm_obj - vSphere vm object
4538 content - vCenter content object
4539 network_name - name of distributed virtaul portgroup
4540 sriov_device - SRIOV device info
4541
4542 Returns:
4543 task object
4544 """
4545 devices = []
4546 vnic_label = "sriov nic"
4547 try:
4548 dvs_portgr = self.get_dvport_group(network_name)
4549 network_name = dvs_portgr.name
4550 nic = vim.vm.device.VirtualDeviceSpec()
4551 # VM device
4552 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4553 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4554 nic.device.addressType = 'assigned'
4555 #nic.device.key = 13016
4556 nic.device.deviceInfo = vim.Description()
4557 nic.device.deviceInfo.label = vnic_label
4558 nic.device.deviceInfo.summary = network_name
4559 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4560
4561 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4562 nic.device.backing.deviceName = network_name
4563 nic.device.backing.useAutoDetect = False
4564 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4565 nic.device.connectable.startConnected = True
4566 nic.device.connectable.allowGuestControl = True
4567
4568 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4569 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4570 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4571
4572 devices.append(nic)
4573 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4574 task = vm_obj.ReconfigVM_Task(vmconf)
4575 return task
4576 except Exception as exp:
4577 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4578 return None
4579
4580
4581 def create_dvPort_group(self, network_name):
4582 """
4583 Method to create disributed virtual portgroup
4584
4585 Args:
4586 network_name - name of network/portgroup
4587
4588 Returns:
4589 portgroup key
4590 """
4591 try:
4592 new_network_name = [network_name, '-', str(uuid.uuid4())]
4593 network_name=''.join(new_network_name)
4594 vcenter_conect, content = self.get_vcenter_content()
4595
4596 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4597 if dv_switch:
4598 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4599 dv_pg_spec.name = network_name
4600
4601 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4602 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4603 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4604 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4605 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4606 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4607
4608 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4609 self.wait_for_vcenter_task(task, vcenter_conect)
4610
4611 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4612 if dvPort_group:
4613 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4614 return dvPort_group.key
4615 else:
4616 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4617
4618 except Exception as exp:
4619 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4620 " : {}".format(network_name, exp))
4621 return None
4622
4623 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4624 """
4625 Method to reconfigure disributed virtual portgroup
4626
4627 Args:
4628 dvPort_group_name - name of disributed virtual portgroup
4629 content - vCenter content object
4630 config_info - disributed virtual portgroup configuration
4631
4632 Returns:
4633 task object
4634 """
4635 try:
4636 dvPort_group = self.get_dvport_group(dvPort_group_name)
4637 if dvPort_group:
4638 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4639 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4640 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4641 if "vlanID" in config_info:
4642 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4643 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4644
4645 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4646 return task
4647 else:
4648 return None
4649 except Exception as exp:
4650 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4651 " : {}".format(dvPort_group_name, exp))
4652 return None
4653
4654
4655 def destroy_dvport_group(self , dvPort_group_name):
4656 """
4657 Method to destroy disributed virtual portgroup
4658
4659 Args:
4660 network_name - name of network/portgroup
4661
4662 Returns:
4663 True if portgroup successfully got deleted else false
4664 """
4665 vcenter_conect, content = self.get_vcenter_content()
4666 try:
4667 status = None
4668 dvPort_group = self.get_dvport_group(dvPort_group_name)
4669 if dvPort_group:
4670 task = dvPort_group.Destroy_Task()
4671 status = self.wait_for_vcenter_task(task, vcenter_conect)
4672 return status
4673 except vmodl.MethodFault as exp:
4674 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4675 exp, dvPort_group_name))
4676 return None
4677
4678
4679 def get_dvport_group(self, dvPort_group_name):
4680 """
4681 Method to get disributed virtual portgroup
4682
4683 Args:
4684 network_name - name of network/portgroup
4685
4686 Returns:
4687 portgroup object
4688 """
4689 vcenter_conect, content = self.get_vcenter_content()
4690 dvPort_group = None
4691 try:
4692 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4693 for item in container.view:
4694 if item.key == dvPort_group_name:
4695 dvPort_group = item
4696 break
4697 return dvPort_group
4698 except vmodl.MethodFault as exp:
4699 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4700 exp, dvPort_group_name))
4701 return None
4702
4703 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4704 """
4705 Method to get disributed virtual portgroup vlanID
4706
4707 Args:
4708 network_name - name of network/portgroup
4709
4710 Returns:
4711 vlan ID
4712 """
4713 vlanId = None
4714 try:
4715 dvPort_group = self.get_dvport_group(dvPort_group_name)
4716 if dvPort_group:
4717 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4718 except vmodl.MethodFault as exp:
4719 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4720 exp, dvPort_group_name))
4721 return vlanId
4722
4723
4724 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4725 """
4726 Method to configure vlanID in disributed virtual portgroup vlanID
4727
4728 Args:
4729 network_name - name of network/portgroup
4730
4731 Returns:
4732 None
4733 """
4734 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4735 if vlanID == 0:
4736 #configure vlanID
4737 vlanID = self.genrate_vlanID(dvPort_group_name)
4738 config = {"vlanID":vlanID}
4739 task = self.reconfig_portgroup(content, dvPort_group_name,
4740 config_info=config)
4741 if task:
4742 status= self.wait_for_vcenter_task(task, vcenter_conect)
4743 if status:
4744 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4745 dvPort_group_name,vlanID))
4746 else:
4747 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4748 dvPort_group_name, vlanID))
4749
4750
4751 def genrate_vlanID(self, network_name):
4752 """
4753 Method to get unused vlanID
4754 Args:
4755 network_name - name of network/portgroup
4756 Returns:
4757 vlanID
4758 """
4759 vlan_id = None
4760 used_ids = []
4761 if self.config.get('vlanID_range') == None:
4762 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4763 "at config value before creating sriov network with vlan tag")
4764 if "used_vlanIDs" not in self.persistent_info:
4765 self.persistent_info["used_vlanIDs"] = {}
4766 else:
4767 used_ids = self.persistent_info["used_vlanIDs"].values()
4768
4769 for vlanID_range in self.config.get('vlanID_range'):
4770 start_vlanid , end_vlanid = vlanID_range.split("-")
4771 if start_vlanid > end_vlanid:
4772 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4773 vlanID_range))
4774
4775 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4776 if id not in used_ids:
4777 vlan_id = id
4778 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4779 return vlan_id
4780 if vlan_id is None:
4781 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4782
4783
4784 def get_obj(self, content, vimtype, name):
4785 """
4786 Get the vsphere object associated with a given text name
4787 """
4788 obj = None
4789 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4790 for item in container.view:
4791 if item.name == name:
4792 obj = item
4793 break
4794 return obj
4795
4796
4797 def insert_media_to_vm(self, vapp, image_id):
4798 """
4799 Method to insert media CD-ROM (ISO image) from catalog to vm.
4800 vapp - vapp object to get vm id
4801 Image_id - image id for cdrom to be inerted to vm
4802 """
4803 # create connection object
4804 vca = self.connect()
4805 try:
4806 # fetching catalog details
4807 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
4808 response = Http.get(url=rest_url,
4809 headers=vca.vcloud_session.get_vcloud_headers(),
4810 verify=vca.verify,
4811 logger=vca.logger)
4812
4813 if response.status_code != 200:
4814 self.logger.error("REST call {} failed reason : {}"\
4815 "status code : {}".format(url_rest_call,
4816 response.content,
4817 response.status_code))
4818 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
4819 "catalog details")
4820 # searching iso name and id
4821 iso_name,media_id = self.get_media_details(vca, response.content)
4822
4823 if iso_name and media_id:
4824 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4825 <ns6:MediaInsertOrEjectParams
4826 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
4827 <ns6:Media
4828 type="application/vnd.vmware.vcloud.media+xml"
4829 name="{}.iso"
4830 id="urn:vcloud:media:{}"
4831 href="https://{}/api/media/{}"/>
4832 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
4833 vca.host,media_id)
4834
4835 for vms in vapp._get_vms():
4836 vm_id = (vms.id).split(':')[-1]
4837
4838 headers = vca.vcloud_session.get_vcloud_headers()
4839 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
4840 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
4841
4842 response = Http.post(url=rest_url,
4843 headers=headers,
4844 data=data,
4845 verify=vca.verify,
4846 logger=vca.logger)
4847
4848 if response.status_code != 202:
4849 self.logger.error("Failed to insert CD-ROM to vm")
4850 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
4851 "ISO image to vm")
4852 else:
4853 task = taskType.parseString(response.content, True)
4854 if isinstance(task, GenericTask):
4855 vca.block_until_completed(task)
4856 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
4857 " image to vm {}".format(vm_id))
4858 except Exception as exp:
4859 self.logger.error("insert_media_to_vm() : exception occurred "\
4860 "while inserting media CD-ROM")
4861 raise vimconn.vimconnException(message=exp)
4862
4863
4864 def get_media_details(self, vca, content):
4865 """
4866 Method to get catalog item details
4867 vca - connection object
4868 content - Catalog details
4869 Return - Media name, media id
4870 """
4871 cataloghref_list = []
4872 try:
4873 if content:
4874 vm_list_xmlroot = XmlElementTree.fromstring(content)
4875 for child in vm_list_xmlroot.iter():
4876 if 'CatalogItem' in child.tag:
4877 cataloghref_list.append(child.attrib.get('href'))
4878 if cataloghref_list is not None:
4879 for href in cataloghref_list:
4880 if href:
4881 response = Http.get(url=href,
4882 headers=vca.vcloud_session.get_vcloud_headers(),
4883 verify=vca.verify,
4884 logger=vca.logger)
4885 if response.status_code != 200:
4886 self.logger.error("REST call {} failed reason : {}"\
4887 "status code : {}".format(href,
4888 response.content,
4889 response.status_code))
4890 raise vimconn.vimconnException("get_media_details : Failed to get "\
4891 "catalogitem details")
4892 list_xmlroot = XmlElementTree.fromstring(response.content)
4893 for child in list_xmlroot.iter():
4894 if 'Entity' in child.tag:
4895 if 'media' in child.attrib.get('href'):
4896 name = child.attrib.get('name')
4897 media_id = child.attrib.get('href').split('/').pop()
4898 return name,media_id
4899 else:
4900 self.logger.debug("Media name and id not found")
4901 return False,False
4902 except Exception as exp:
4903 self.logger.error("get_media_details : exception occurred "\
4904 "getting media details")
4905 raise vimconn.vimconnException(message=exp)
4906