1.Created new file for VIO connector as vimconn_vio.py that extends the existing...
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 try:
361 if self.org_uuid is None:
362 org_dict = self.get_org_list()
363 for org in org_dict:
364 # we set org UUID at the init phase but we can do it only when we have valid credential.
365 if org_dict[org] == self.org_name:
366 self.org_uuid = org
367 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
368 break
369 else:
370 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
371
372 # if well good we require for org details
373 org_details_dict = self.get_org(org_uuid=self.org_uuid)
374
375 # we have two case if we want to initialize VDC ID or VDC name at run time
376 # tenant_name provided but no tenant id
377 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
378 vdcs_dict = org_details_dict['vdcs']
379 for vdc in vdcs_dict:
380 if vdcs_dict[vdc] == self.tenant_name:
381 self.tenant_id = vdc
382 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
383 self.org_name))
384 break
385 else:
386 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
387 # case two we have tenant_id but we don't have tenant name so we find and set it.
388 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
389 vdcs_dict = org_details_dict['vdcs']
390 for vdc in vdcs_dict:
391 if vdc == self.tenant_id:
392 self.tenant_name = vdcs_dict[vdc]
393 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
394 self.org_name))
395 break
396 else:
397 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
398 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
399 except:
400 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
401 self.logger.debug(traceback.format_exc())
402 self.org_uuid = None
403
404 def new_tenant(self, tenant_name=None, tenant_description=None):
405 """ Method adds a new tenant to VIM with this name.
406 This action requires access to create VDC action in vCloud director.
407
408 Args:
409 tenant_name is tenant_name to be created.
410 tenant_description not used for this call
411
412 Return:
413 returns the tenant identifier in UUID format.
414 If action is failed method will throw vimconn.vimconnException method
415 """
416 vdc_task = self.create_vdc(vdc_name=tenant_name)
417 if vdc_task is not None:
418 vdc_uuid, value = vdc_task.popitem()
419 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
420 return vdc_uuid
421 else:
422 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
423
424 def delete_tenant(self, tenant_id=None):
425 """Delete a tenant from VIM"""
426 'Returns the tenant identifier'
427 raise vimconn.vimconnNotImplemented("Should have implemented this")
428
429 def get_tenant_list(self, filter_dict={}):
430 """Obtain tenants of VIM
431 filter_dict can contain the following keys:
432 name: filter by tenant name
433 id: filter by tenant uuid/id
434 <other VIM specific>
435 Returns the tenant list of dictionaries:
436 [{'name':'<name>, 'id':'<id>, ...}, ...]
437
438 """
439 org_dict = self.get_org(self.org_uuid)
440 vdcs_dict = org_dict['vdcs']
441
442 vdclist = []
443 try:
444 for k in vdcs_dict:
445 entry = {'name': vdcs_dict[k], 'id': k}
446 # if caller didn't specify dictionary we return all tenants.
447 if filter_dict is not None and filter_dict:
448 filtered_entry = entry.copy()
449 filtered_dict = set(entry.keys()) - set(filter_dict)
450 for unwanted_key in filtered_dict: del entry[unwanted_key]
451 if filter_dict == entry:
452 vdclist.append(filtered_entry)
453 else:
454 vdclist.append(entry)
455 except:
456 self.logger.debug("Error in get_tenant_list()")
457 self.logger.debug(traceback.format_exc())
458 raise vimconn.vimconnException("Incorrect state. {}")
459
460 return vdclist
461
462 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
463 """Adds a tenant network to VIM
464 net_name is the name
465 net_type can be 'bridge','data'.'ptp'.
466 ip_profile is a dict containing the IP parameters of the network
467 shared is a boolean
468 Returns the network identifier"""
469
470 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
471 .format(net_name, net_type, ip_profile, shared))
472
473 isshared = 'false'
474 if shared:
475 isshared = 'true'
476
477 # ############# Stub code for SRIOV #################
478 # if net_type == "data" or net_type == "ptp":
479 # if self.config.get('dv_switch_name') == None:
480 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
481 # network_uuid = self.create_dvPort_group(net_name)
482
483 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
484 ip_profile=ip_profile, isshared=isshared)
485 if network_uuid is not None:
486 return network_uuid
487 else:
488 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
489
490 def get_vcd_network_list(self):
491 """ Method available organization for a logged in tenant
492
493 Returns:
494 The return vca object that letter can be used to connect to vcloud direct as admin
495 """
496
497 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
498 vca = self.connect()
499 if not vca:
500 raise vimconn.vimconnConnectionException("self.connect() is failed.")
501
502 if not self.tenant_name:
503 raise vimconn.vimconnConnectionException("Tenant name is empty.")
504
505 vdc = vca.get_vdc(self.tenant_name)
506 if vdc is None:
507 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
508
509 vdc_uuid = vdc.get_id().split(":")[3]
510 networks = vca.get_networks(vdc.get_name())
511 network_list = []
512 try:
513 for network in networks:
514 filter_dict = {}
515 netid = network.get_id().split(":")
516 if len(netid) != 4:
517 continue
518
519 filter_dict["name"] = network.get_name()
520 filter_dict["id"] = netid[3]
521 filter_dict["shared"] = network.get_IsShared()
522 filter_dict["tenant_id"] = vdc_uuid
523 if network.get_status() == 1:
524 filter_dict["admin_state_up"] = True
525 else:
526 filter_dict["admin_state_up"] = False
527 filter_dict["status"] = "ACTIVE"
528 filter_dict["type"] = "bridge"
529 network_list.append(filter_dict)
530 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
531 except:
532 self.logger.debug("Error in get_vcd_network_list")
533 self.logger.debug(traceback.format_exc())
534 pass
535
536 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
537 return network_list
538
539 def get_network_list(self, filter_dict={}):
540 """Obtain tenant networks of VIM
541 Filter_dict can be:
542 name: network name OR/AND
543 id: network uuid OR/AND
544 shared: boolean OR/AND
545 tenant_id: tenant OR/AND
546 admin_state_up: boolean
547 status: 'ACTIVE'
548
549 [{key : value , key : value}]
550
551 Returns the network list of dictionaries:
552 [{<the fields at Filter_dict plus some VIM specific>}, ...]
553 List can be empty
554 """
555
556 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
557 vca = self.connect()
558 if not vca:
559 raise vimconn.vimconnConnectionException("self.connect() is failed.")
560
561 if not self.tenant_name:
562 raise vimconn.vimconnConnectionException("Tenant name is empty.")
563
564 vdc = vca.get_vdc(self.tenant_name)
565 if vdc is None:
566 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
567
568 try:
569 vdcid = vdc.get_id().split(":")[3]
570 networks = vca.get_networks(vdc.get_name())
571 network_list = []
572
573 for network in networks:
574 filter_entry = {}
575 net_uuid = network.get_id().split(":")
576 if len(net_uuid) != 4:
577 continue
578 else:
579 net_uuid = net_uuid[3]
580 # create dict entry
581 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
582 vdcid,
583 network.get_name()))
584 filter_entry["name"] = network.get_name()
585 filter_entry["id"] = net_uuid
586 filter_entry["shared"] = network.get_IsShared()
587 filter_entry["tenant_id"] = vdcid
588 if network.get_status() == 1:
589 filter_entry["admin_state_up"] = True
590 else:
591 filter_entry["admin_state_up"] = False
592 filter_entry["status"] = "ACTIVE"
593 filter_entry["type"] = "bridge"
594 filtered_entry = filter_entry.copy()
595
596 if filter_dict is not None and filter_dict:
597 # we remove all the key : value we don't care and match only
598 # respected field
599 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
600 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
601 if filter_dict == filter_entry:
602 network_list.append(filtered_entry)
603 else:
604 network_list.append(filtered_entry)
605 except:
606 self.logger.debug("Error in get_vcd_network_list")
607 self.logger.debug(traceback.format_exc())
608
609 self.logger.debug("Returning {}".format(network_list))
610 return network_list
611
612 def get_network(self, net_id):
613 """Method obtains network details of net_id VIM network
614 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
615
616 vca = self.connect()
617 if not vca:
618 raise vimconn.vimconnConnectionException("self.connect() is failed")
619
620 try:
621 vdc = vca.get_vdc(self.tenant_name)
622 vdc_id = vdc.get_id().split(":")[3]
623
624 networks = vca.get_networks(vdc.get_name())
625 filter_dict = {}
626
627 for network in networks:
628 vdc_network_id = network.get_id().split(":")
629 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
630 filter_dict["name"] = network.get_name()
631 filter_dict["id"] = vdc_network_id[3]
632 filter_dict["shared"] = network.get_IsShared()
633 filter_dict["tenant_id"] = vdc_id
634 if network.get_status() == 1:
635 filter_dict["admin_state_up"] = True
636 else:
637 filter_dict["admin_state_up"] = False
638 filter_dict["status"] = "ACTIVE"
639 filter_dict["type"] = "bridge"
640 self.logger.debug("Returning {}".format(filter_dict))
641 return filter_dict
642 except:
643 self.logger.debug("Error in get_network")
644 self.logger.debug(traceback.format_exc())
645
646 return filter_dict
647
648 def delete_network(self, net_id):
649 """
650 Method Deletes a tenant network from VIM, provide the network id.
651
652 Returns the network identifier or raise an exception
653 """
654
655 vca = self.connect()
656 if not vca:
657 raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
658
659 # ############# Stub code for SRIOV #################
660 # dvport_group = self.get_dvport_group(net_id)
661 # if dvport_group:
662 # #delete portgroup
663 # status = self.destroy_dvport_group(net_id)
664 # if status:
665 # # Remove vlanID from persistent info
666 # if net_id in self.persistent_info["used_vlanIDs"]:
667 # del self.persistent_info["used_vlanIDs"][net_id]
668 #
669 # return net_id
670
671 vcd_network = self.get_vcd_network(network_uuid=net_id)
672 if vcd_network is not None and vcd_network:
673 if self.delete_network_action(network_uuid=net_id):
674 return net_id
675 else:
676 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
677
678 def refresh_nets_status(self, net_list):
679 """Get the status of the networks
680 Params: the list of network identifiers
681 Returns a dictionary with:
682 net_id: #VIM id of this network
683 status: #Mandatory. Text with one of:
684 # DELETED (not found at vim)
685 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
686 # OTHER (Vim reported other status not understood)
687 # ERROR (VIM indicates an ERROR status)
688 # ACTIVE, INACTIVE, DOWN (admin down),
689 # BUILD (on building process)
690 #
691 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
692 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
693
694 """
695
696 vca = self.connect()
697 if not vca:
698 raise vimconn.vimconnConnectionException("self.connect() is failed")
699
700 dict_entry = {}
701 try:
702 for net in net_list:
703 errormsg = ''
704 vcd_network = self.get_vcd_network(network_uuid=net)
705 if vcd_network is not None and vcd_network:
706 if vcd_network['status'] == '1':
707 status = 'ACTIVE'
708 else:
709 status = 'DOWN'
710 else:
711 status = 'DELETED'
712 errormsg = 'Network not found.'
713
714 dict_entry[net] = {'status': status, 'error_msg': errormsg,
715 'vim_info': yaml.safe_dump(vcd_network)}
716 except:
717 self.logger.debug("Error in refresh_nets_status")
718 self.logger.debug(traceback.format_exc())
719
720 return dict_entry
721
722 def get_flavor(self, flavor_id):
723 """Obtain flavor details from the VIM
724 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
725 """
726 if flavor_id not in vimconnector.flavorlist:
727 raise vimconn.vimconnNotFoundException("Flavor not found.")
728 return vimconnector.flavorlist[flavor_id]
729
730 def new_flavor(self, flavor_data):
731 """Adds a tenant flavor to VIM
732 flavor_data contains a dictionary with information, keys:
733 name: flavor name
734 ram: memory (cloud type) in MBytes
735 vpcus: cpus (cloud type)
736 extended: EPA parameters
737 - numas: #items requested in same NUMA
738 memory: number of 1G huge pages memory
739 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
740 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
741 - name: interface name
742 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
743 bandwidth: X Gbps; requested guarantee bandwidth
744 vpci: requested virtual PCI address
745 disk: disk size
746 is_public:
747 #TODO to concrete
748 Returns the flavor identifier"""
749
750 # generate a new uuid put to internal dict and return it.
751 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
752 new_flavor=flavor_data
753 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
754 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
755 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
756
757 extended_flv = flavor_data.get("extended")
758 if extended_flv:
759 numas=extended_flv.get("numas")
760 if numas:
761 for numa in numas:
762 #overwrite ram and vcpus
763 ram = numa['memory']*1024
764 if 'paired-threads' in numa:
765 cpu = numa['paired-threads']*2
766 elif 'cores' in numa:
767 cpu = numa['cores']
768 elif 'threads' in numa:
769 cpu = numa['threads']
770
771 new_flavor[FLAVOR_RAM_KEY] = ram
772 new_flavor[FLAVOR_VCPUS_KEY] = cpu
773 new_flavor[FLAVOR_DISK_KEY] = disk
774 # generate a new uuid put to internal dict and return it.
775 flavor_id = uuid.uuid4()
776 vimconnector.flavorlist[str(flavor_id)] = new_flavor
777 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
778
779 return str(flavor_id)
780
781 def delete_flavor(self, flavor_id):
782 """Deletes a tenant flavor from VIM identify by its id
783
784 Returns the used id or raise an exception
785 """
786 if flavor_id not in vimconnector.flavorlist:
787 raise vimconn.vimconnNotFoundException("Flavor not found.")
788
789 vimconnector.flavorlist.pop(flavor_id, None)
790 return flavor_id
791
792 def new_image(self, image_dict):
793 """
794 Adds a tenant image to VIM
795 Returns:
796 200, image-id if the image is created
797 <0, message if there is an error
798 """
799
800 return self.get_image_id_from_path(image_dict['location'])
801
802 def delete_image(self, image_id):
803 """
804
805 :param image_id:
806 :return:
807 """
808
809 raise vimconn.vimconnNotImplemented("Should have implemented this")
810
811 def catalog_exists(self, catalog_name, catalogs):
812 """
813
814 :param catalog_name:
815 :param catalogs:
816 :return:
817 """
818 for catalog in catalogs:
819 if catalog.name == catalog_name:
820 return True
821 return False
822
823 def create_vimcatalog(self, vca=None, catalog_name=None):
824 """ Create new catalog entry in vCloud director.
825
826 Args
827 vca: vCloud director.
828 catalog_name catalog that client wish to create. Note no validation done for a name.
829 Client must make sure that provide valid string representation.
830
831 Return (bool) True if catalog created.
832
833 """
834 try:
835 task = vca.create_catalog(catalog_name, catalog_name)
836 result = vca.block_until_completed(task)
837 if not result:
838 return False
839 catalogs = vca.get_catalogs()
840 except:
841 return False
842 return self.catalog_exists(catalog_name, catalogs)
843
844 # noinspection PyIncorrectDocstring
845 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
846 description='', progress=False, chunk_bytes=128 * 1024):
847 """
848 Uploads a OVF file to a vCloud catalog
849
850 :param chunk_bytes:
851 :param progress:
852 :param description:
853 :param image_name:
854 :param vca:
855 :param catalog_name: (str): The name of the catalog to upload the media.
856 :param media_file_name: (str): The name of the local media file to upload.
857 :return: (bool) True if the media file was successfully uploaded, false otherwise.
858 """
859 os.path.isfile(media_file_name)
860 statinfo = os.stat(media_file_name)
861
862 # find a catalog entry where we upload OVF.
863 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
864 # status change.
865 # if VCD can parse OVF we upload VMDK file
866 try:
867 for catalog in vca.get_catalogs():
868 if catalog_name != catalog.name:
869 continue
870 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
871 link.get_rel() == 'add', catalog.get_Link())
872 assert len(link) == 1
873 data = """
874 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
875 """ % (escape(catalog_name), escape(description))
876 headers = vca.vcloud_session.get_vcloud_headers()
877 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
878 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
879 if response.status_code == requests.codes.created:
880 catalogItem = XmlElementTree.fromstring(response.content)
881 entity = [child for child in catalogItem if
882 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
883 href = entity.get('href')
884 template = href
885 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
886 verify=vca.verify, logger=self.logger)
887
888 if response.status_code == requests.codes.ok:
889 media = mediaType.parseString(response.content, True)
890 link = filter(lambda link: link.get_rel() == 'upload:default',
891 media.get_Files().get_File()[0].get_Link())[0]
892 headers = vca.vcloud_session.get_vcloud_headers()
893 headers['Content-Type'] = 'Content-Type text/xml'
894 response = Http.put(link.get_href(),
895 data=open(media_file_name, 'rb'),
896 headers=headers,
897 verify=vca.verify, logger=self.logger)
898 if response.status_code != requests.codes.ok:
899 self.logger.debug(
900 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
901 media_file_name))
902 return False
903
904 # TODO fix this with aync block
905 time.sleep(5)
906
907 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
908
909 # uploading VMDK file
910 # check status of OVF upload and upload remaining files.
911 response = Http.get(template,
912 headers=vca.vcloud_session.get_vcloud_headers(),
913 verify=vca.verify,
914 logger=self.logger)
915
916 if response.status_code == requests.codes.ok:
917 media = mediaType.parseString(response.content, True)
918 number_of_files = len(media.get_Files().get_File())
919 for index in xrange(0, number_of_files):
920 links_list = filter(lambda link: link.get_rel() == 'upload:default',
921 media.get_Files().get_File()[index].get_Link())
922 for link in links_list:
923 # we skip ovf since it already uploaded.
924 if 'ovf' in link.get_href():
925 continue
926 # The OVF file and VMDK must be in a same directory
927 head, tail = os.path.split(media_file_name)
928 file_vmdk = head + '/' + link.get_href().split("/")[-1]
929 if not os.path.isfile(file_vmdk):
930 return False
931 statinfo = os.stat(file_vmdk)
932 if statinfo.st_size == 0:
933 return False
934 hrefvmdk = link.get_href()
935
936 if progress:
937 print("Uploading file: {}".format(file_vmdk))
938 if progress:
939 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
940 FileTransferSpeed()]
941 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
942
943 bytes_transferred = 0
944 f = open(file_vmdk, 'rb')
945 while bytes_transferred < statinfo.st_size:
946 my_bytes = f.read(chunk_bytes)
947 if len(my_bytes) <= chunk_bytes:
948 headers = vca.vcloud_session.get_vcloud_headers()
949 headers['Content-Range'] = 'bytes %s-%s/%s' % (
950 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
951 headers['Content-Length'] = str(len(my_bytes))
952 response = Http.put(hrefvmdk,
953 headers=headers,
954 data=my_bytes,
955 verify=vca.verify,
956 logger=None)
957
958 if response.status_code == requests.codes.ok:
959 bytes_transferred += len(my_bytes)
960 if progress:
961 progress_bar.update(bytes_transferred)
962 else:
963 self.logger.debug(
964 'file upload failed with error: [%s] %s' % (response.status_code,
965 response.content))
966
967 f.close()
968 return False
969 f.close()
970 if progress:
971 progress_bar.finish()
972 time.sleep(10)
973 return True
974 else:
975 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
976 format(catalog_name, media_file_name))
977 return False
978 except Exception as exp:
979 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
980 .format(catalog_name,media_file_name, exp))
981 raise vimconn.vimconnException(
982 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
983 .format(catalog_name,media_file_name, exp))
984
985 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
986 return False
987
988 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
989 """Upload media file"""
990 # TODO add named parameters for readability
991
992 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
993 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
994
995 def validate_uuid4(self, uuid_string=None):
996 """ Method validate correct format of UUID.
997
998 Return: true if string represent valid uuid
999 """
1000 try:
1001 val = uuid.UUID(uuid_string, version=4)
1002 except ValueError:
1003 return False
1004 return True
1005
1006 def get_catalogid(self, catalog_name=None, catalogs=None):
1007 """ Method check catalog and return catalog ID in UUID format.
1008
1009 Args
1010 catalog_name: catalog name as string
1011 catalogs: list of catalogs.
1012
1013 Return: catalogs uuid
1014 """
1015
1016 for catalog in catalogs:
1017 if catalog.name == catalog_name:
1018 catalog_id = catalog.get_id().split(":")
1019 return catalog_id[3]
1020 return None
1021
1022 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1023 """ Method check catalog and return catalog name lookup done by catalog UUID.
1024
1025 Args
1026 catalog_name: catalog name as string
1027 catalogs: list of catalogs.
1028
1029 Return: catalogs name or None
1030 """
1031
1032 if not self.validate_uuid4(uuid_string=catalog_uuid):
1033 return None
1034
1035 for catalog in catalogs:
1036 catalog_id = catalog.get_id().split(":")[3]
1037 if catalog_id == catalog_uuid:
1038 return catalog.name
1039 return None
1040
1041 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1042 """ Method check catalog and return catalog name lookup done by catalog UUID.
1043
1044 Args
1045 catalog_name: catalog name as string
1046 catalogs: list of catalogs.
1047
1048 Return: catalogs name or None
1049 """
1050
1051 if not self.validate_uuid4(uuid_string=catalog_uuid):
1052 return None
1053
1054 for catalog in catalogs:
1055 catalog_id = catalog.get_id().split(":")[3]
1056 if catalog_id == catalog_uuid:
1057 return catalog
1058 return None
1059
1060 def get_image_id_from_path(self, path=None, progress=False):
1061 """ Method upload OVF image to vCloud director.
1062
1063 Each OVF image represented as single catalog entry in vcloud director.
1064 The method check for existing catalog entry. The check done by file name without file extension.
1065
1066 if given catalog name already present method will respond with existing catalog uuid otherwise
1067 it will create new catalog entry and upload OVF file to newly created catalog.
1068
1069 If method can't create catalog entry or upload a file it will throw exception.
1070
1071 Method accept boolean flag progress that will output progress bar. It useful method
1072 for standalone upload use case. In case to test large file upload.
1073
1074 Args
1075 path: - valid path to OVF file.
1076 progress - boolean progress bar show progress bar.
1077
1078 Return: if image uploaded correct method will provide image catalog UUID.
1079 """
1080 vca = self.connect()
1081 if not vca:
1082 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1083
1084 if not path:
1085 raise vimconn.vimconnException("Image path can't be None.")
1086
1087 if not os.path.isfile(path):
1088 raise vimconn.vimconnException("Can't read file. File not found.")
1089
1090 if not os.access(path, os.R_OK):
1091 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1092
1093 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1094
1095 dirpath, filename = os.path.split(path)
1096 flname, file_extension = os.path.splitext(path)
1097 if file_extension != '.ovf':
1098 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1099 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1100
1101 catalog_name = os.path.splitext(filename)[0]
1102 catalog_md5_name = hashlib.md5(path).hexdigest()
1103 self.logger.debug("File name {} Catalog Name {} file path {} "
1104 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1105
1106 try:
1107 catalogs = vca.get_catalogs()
1108 except Exception as exp:
1109 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1110 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1111
1112 if len(catalogs) == 0:
1113 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1114 result = self.create_vimcatalog(vca, catalog_md5_name)
1115 if not result:
1116 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1117 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1118 media_name=filename, medial_file_name=path, progress=progress)
1119 if not result:
1120 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1121 return self.get_catalogid(catalog_name, vca.get_catalogs())
1122 else:
1123 for catalog in catalogs:
1124 # search for existing catalog if we find same name we return ID
1125 # TODO optimize this
1126 if catalog.name == catalog_md5_name:
1127 self.logger.debug("Found existing catalog entry for {} "
1128 "catalog id {}".format(catalog_name,
1129 self.get_catalogid(catalog_md5_name, catalogs)))
1130 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1131
1132 # if we didn't find existing catalog we create a new one and upload image.
1133 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1134 result = self.create_vimcatalog(vca, catalog_md5_name)
1135 if not result:
1136 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1137
1138 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1139 media_name=filename, medial_file_name=path, progress=progress)
1140 if not result:
1141 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1142
1143 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1144
1145 def get_image_list(self, filter_dict={}):
1146 '''Obtain tenant images from VIM
1147 Filter_dict can be:
1148 name: image name
1149 id: image uuid
1150 checksum: image checksum
1151 location: image path
1152 Returns the image list of dictionaries:
1153 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1154 List can be empty
1155 '''
1156 vca = self.connect()
1157 if not vca:
1158 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1159 try:
1160 image_list = []
1161 catalogs = vca.get_catalogs()
1162 if len(catalogs) == 0:
1163 return image_list
1164 else:
1165 for catalog in catalogs:
1166 catalog_uuid = catalog.get_id().split(":")[3]
1167 name = catalog.name
1168 filtered_dict = {}
1169 if filter_dict.get("name") and filter_dict["name"] != name:
1170 continue
1171 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1172 continue
1173 filtered_dict ["name"] = name
1174 filtered_dict ["id"] = catalog_uuid
1175 image_list.append(filtered_dict)
1176
1177 self.logger.debug("List of already created catalog items: {}".format(image_list))
1178 return image_list
1179 except Exception as exp:
1180 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1181
1182 def get_vappid(self, vdc=None, vapp_name=None):
1183 """ Method takes vdc object and vApp name and returns vapp uuid or None
1184
1185 Args:
1186 vdc: The VDC object.
1187 vapp_name: is application vappp name identifier
1188
1189 Returns:
1190 The return vApp name otherwise None
1191 """
1192 if vdc is None or vapp_name is None:
1193 return None
1194 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1195 try:
1196 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1197 vdc.ResourceEntities.ResourceEntity)
1198 if len(refs) == 1:
1199 return refs[0].href.split("vapp")[1][1:]
1200 except Exception as e:
1201 self.logger.exception(e)
1202 return False
1203 return None
1204
1205 def check_vapp(self, vdc=None, vapp_uuid=None):
1206 """ Method Method returns True or False if vapp deployed in vCloud director
1207
1208 Args:
1209 vca: Connector to VCA
1210 vdc: The VDC object.
1211 vappid: vappid is application identifier
1212
1213 Returns:
1214 The return True if vApp deployed
1215 :param vdc:
1216 :param vapp_uuid:
1217 """
1218 try:
1219 refs = filter(lambda ref:
1220 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1221 vdc.ResourceEntities.ResourceEntity)
1222 for ref in refs:
1223 vappid = ref.href.split("vapp")[1][1:]
1224 # find vapp with respected vapp uuid
1225 if vappid == vapp_uuid:
1226 return True
1227 except Exception as e:
1228 self.logger.exception(e)
1229 return False
1230 return False
1231
1232 def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
1233 """Method returns vApp name from vCD and lookup done by vapp_id.
1234
1235 Args:
1236 vca: Connector to VCA
1237 vdc: The VDC object.
1238 vapp_uuid: vappid is application identifier
1239
1240 Returns:
1241 The return vApp name otherwise None
1242 """
1243
1244 try:
1245 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1246 vdc.ResourceEntities.ResourceEntity)
1247 for ref in refs:
1248 # we care only about UUID the rest doesn't matter
1249 vappid = ref.href.split("vapp")[1][1:]
1250 if vappid == vapp_uuid:
1251 response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
1252 logger=self.logger)
1253 tree = XmlElementTree.fromstring(response.content)
1254 return tree.attrib['name']
1255 except Exception as e:
1256 self.logger.exception(e)
1257 return None
1258 return None
1259
1260 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1261 cloud_config=None, disk_list=None):
1262 """Adds a VM instance to VIM
1263 Params:
1264 start: indicates if VM must start or boot in pause mode. Ignored
1265 image_id,flavor_id: image and flavor uuid
1266 net_list: list of interfaces, each one is a dictionary with:
1267 name:
1268 net_id: network uuid to connect
1269 vpci: virtual vcpi to assign
1270 model: interface model, virtio, e2000, ...
1271 mac_address:
1272 use: 'data', 'bridge', 'mgmt'
1273 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1274 vim_id: filled/added by this function
1275 cloud_config: can be a text script to be passed directly to cloud-init,
1276 or an object to inject users and ssh keys with format:
1277 key-pairs: [] list of keys to install to the default user
1278 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1279 #TODO ip, security groups
1280 Returns >=0, the instance identifier
1281 <0, error_text
1282 """
1283
1284 self.logger.info("Creating new instance for entry {}".format(name))
1285 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1286 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1287 vca = self.connect()
1288 if not vca:
1289 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1290
1291 #new vm name = vmname + tenant_id + uuid
1292 new_vm_name = [name, '-', str(uuid.uuid4())]
1293 vmname_andid = ''.join(new_vm_name)
1294
1295 # if vm already deployed we return existing uuid
1296 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1297 # if vapp_uuid is not None:
1298 # return vapp_uuid
1299
1300 # we check for presence of VDC, Catalog entry and Flavor.
1301 vdc = vca.get_vdc(self.tenant_name)
1302 if vdc is None:
1303 raise vimconn.vimconnNotFoundException(
1304 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1305 catalogs = vca.get_catalogs()
1306 if catalogs is None:
1307 raise vimconn.vimconnNotFoundException(
1308 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1309
1310 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1311 if catalog_hash_name:
1312 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1313 else:
1314 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1315 "(Failed retrieve catalog information {})".format(name, image_id))
1316
1317
1318 # Set vCPU and Memory based on flavor.
1319 vm_cpus = None
1320 vm_memory = None
1321 vm_disk = None
1322
1323 if flavor_id is not None:
1324 if flavor_id not in vimconnector.flavorlist:
1325 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1326 "Failed retrieve flavor information "
1327 "flavor id {}".format(name, flavor_id))
1328 else:
1329 try:
1330 flavor = vimconnector.flavorlist[flavor_id]
1331 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1332 vm_memory = flavor[FLAVOR_RAM_KEY]
1333 vm_disk = flavor[FLAVOR_DISK_KEY]
1334 extended = flavor.get("extended", None)
1335 if extended:
1336 numas=extended.get("numas", None)
1337
1338 except Exception as exp:
1339 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1340
1341 # image upload creates template name as catalog name space Template.
1342 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1343 power_on = 'false'
1344 if start:
1345 power_on = 'true'
1346
1347 # client must provide at least one entry in net_list if not we report error
1348 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1349 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1350 primary_net = None
1351 primary_netname = None
1352 network_mode = 'bridged'
1353 if net_list is not None and len(net_list) > 0:
1354 for net in net_list:
1355 if 'use' in net and net['use'] == 'mgmt':
1356 primary_net = net
1357 if primary_net is None:
1358 primary_net = net_list[0]
1359
1360 try:
1361 primary_net_id = primary_net['net_id']
1362 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1363 if 'name' in network_dict:
1364 primary_netname = network_dict['name']
1365
1366 except KeyError:
1367 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1368 else:
1369 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1370
1371 # use: 'data', 'bridge', 'mgmt'
1372 # create vApp. Set vcpu and ram based on flavor id.
1373 try:
1374 vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1375 self.get_catalogbyid(image_id, catalogs),
1376 network_name=None, # None while creating vapp
1377 network_mode=network_mode,
1378 vm_name=vmname_andid,
1379 vm_cpus=vm_cpus, # can be None if flavor is None
1380 vm_memory=vm_memory) # can be None if flavor is None
1381
1382 if vapptask is None or vapptask is False:
1383 raise vimconn.vimconnUnexpectedResponse(
1384 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1385 if type(vapptask) is VappTask:
1386 vca.block_until_completed(vapptask)
1387
1388 except Exception as exp:
1389 raise vimconn.vimconnUnexpectedResponse(
1390 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1391
1392 # we should have now vapp in undeployed state.
1393 try:
1394 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1395 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1396 except Exception as exp:
1397 raise vimconn.vimconnUnexpectedResponse(
1398 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1399 .format(vmname_andid, exp))
1400
1401 if vapp is None:
1402 raise vimconn.vimconnUnexpectedResponse(
1403 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1404 vmname_andid))
1405
1406 #Add PCI passthrough/SRIOV configrations
1407 vm_obj = None
1408 pci_devices_info = []
1409 sriov_net_info = []
1410 reserve_memory = False
1411
1412 for net in net_list:
1413 if net["type"]=="PF":
1414 pci_devices_info.append(net)
1415 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1416 sriov_net_info.append(net)
1417
1418 #Add PCI
1419 if len(pci_devices_info) > 0:
1420 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1421 vmname_andid ))
1422 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1423 pci_devices_info,
1424 vmname_andid)
1425 if PCI_devices_status:
1426 self.logger.info("Added PCI devives {} to VM {}".format(
1427 pci_devices_info,
1428 vmname_andid)
1429 )
1430 reserve_memory = True
1431 else:
1432 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1433 pci_devices_info,
1434 vmname_andid)
1435 )
1436 # Modify vm disk
1437 if vm_disk:
1438 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1439 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1440 if result :
1441 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1442
1443 #Add new or existing disks to vApp
1444 if disk_list:
1445 added_existing_disk = False
1446 for disk in disk_list:
1447 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1448 image_id = disk['image_id']
1449 # Adding CD-ROM to VM
1450 # will revisit code once specification ready to support this feature
1451 self.insert_media_to_vm(vapp, image_id)
1452 elif "image_id" in disk and disk["image_id"] is not None:
1453 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1454 disk["image_id"] , vapp_uuid))
1455 self.add_existing_disk(catalogs=catalogs,
1456 image_id=disk["image_id"],
1457 size = disk["size"],
1458 template_name=templateName,
1459 vapp_uuid=vapp_uuid
1460 )
1461 added_existing_disk = True
1462 else:
1463 #Wait till added existing disk gets reflected into vCD database/API
1464 if added_existing_disk:
1465 time.sleep(5)
1466 added_existing_disk = False
1467 self.add_new_disk(vca, vapp_uuid, disk['size'])
1468
1469 if numas:
1470 # Assigning numa affinity setting
1471 for numa in numas:
1472 if 'paired-threads-id' in numa:
1473 paired_threads_id = numa['paired-threads-id']
1474 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1475
1476 # add NICs & connect to networks in netlist
1477 try:
1478 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1479 nicIndex = 0
1480 primary_nic_index = 0
1481 for net in net_list:
1482 # openmano uses network id in UUID format.
1483 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1484 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1485 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1486
1487 if 'net_id' not in net:
1488 continue
1489
1490 interface_net_id = net['net_id']
1491 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1492 interface_network_mode = net['use']
1493
1494 if interface_network_mode == 'mgmt':
1495 primary_nic_index = nicIndex
1496
1497 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1498 - DHCP (The IP address is obtained from a DHCP service.)
1499 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1500 - NONE (No IP addressing mode specified.)"""
1501
1502 if primary_netname is not None:
1503 nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
1504 if len(nets) == 1:
1505 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1506 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1507 if type(task) is GenericTask:
1508 vca.block_until_completed(task)
1509 # connect network to VM - with all DHCP by default
1510
1511 type_list = ['PF','VF','VFnotShared']
1512 if 'type' in net and net['type'] not in type_list:
1513 # fetching nic type from vnf
1514 if 'model' in net:
1515 nic_type = net['model']
1516 self.logger.info("new_vminstance(): adding network adapter "\
1517 "to a network {}".format(nets[0].name))
1518 self.add_network_adapter_to_vms(vapp, nets[0].name,
1519 primary_nic_index,
1520 nicIndex,
1521 net,
1522 nic_type=nic_type)
1523 else:
1524 self.logger.info("new_vminstance(): adding network adapter "\
1525 "to a network {}".format(nets[0].name))
1526 self.add_network_adapter_to_vms(vapp, nets[0].name,
1527 primary_nic_index,
1528 nicIndex,
1529 net)
1530 nicIndex += 1
1531
1532 # cloud-init for ssh-key injection
1533 if cloud_config:
1534 self.cloud_init(vapp,cloud_config)
1535
1536 # deploy and power on vm
1537 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1538 deploytask = vapp.deploy(powerOn=False)
1539 if type(deploytask) is GenericTask:
1540 vca.block_until_completed(deploytask)
1541
1542 # ############# Stub code for SRIOV #################
1543 #Add SRIOV
1544 # if len(sriov_net_info) > 0:
1545 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1546 # vmname_andid ))
1547 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1548 # sriov_net_info,
1549 # vmname_andid)
1550 # if sriov_status:
1551 # self.logger.info("Added SRIOV {} to VM {}".format(
1552 # sriov_net_info,
1553 # vmname_andid)
1554 # )
1555 # reserve_memory = True
1556 # else:
1557 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1558 # sriov_net_info,
1559 # vmname_andid)
1560 # )
1561
1562 # If VM has PCI devices or SRIOV reserve memory for VM
1563 if reserve_memory:
1564 memReserve = vm_obj.config.hardware.memoryMB
1565 spec = vim.vm.ConfigSpec()
1566 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1567 task = vm_obj.ReconfigVM_Task(spec=spec)
1568 if task:
1569 result = self.wait_for_vcenter_task(task, vcenter_conect)
1570 self.logger.info("Reserved memmoery {} MB for "\
1571 "VM VM status: {}".format(str(memReserve),result))
1572 else:
1573 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1574 str(memReserve),str(vm_obj)))
1575
1576 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1577 poweron_task = vapp.poweron()
1578 if type(poweron_task) is GenericTask:
1579 vca.block_until_completed(poweron_task)
1580
1581 except Exception as exp :
1582 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1583 self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1584 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1585
1586 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1587 wait_time = 0
1588 vapp_uuid = None
1589 while wait_time <= MAX_WAIT_TIME:
1590 try:
1591 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1592 except Exception as exp:
1593 raise vimconn.vimconnUnexpectedResponse(
1594 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1595 .format(vmname_andid, exp))
1596
1597 if vapp and vapp.me.deployed:
1598 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1599 break
1600 else:
1601 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1602 time.sleep(INTERVAL_TIME)
1603
1604 wait_time +=INTERVAL_TIME
1605
1606 if vapp_uuid is not None:
1607 return vapp_uuid
1608 else:
1609 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1610
1611 ##
1612 ##
1613 ## based on current discussion
1614 ##
1615 ##
1616 ## server:
1617 # created: '2016-09-08T11:51:58'
1618 # description: simple-instance.linux1.1
1619 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1620 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1621 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1622 # status: ACTIVE
1623 # error_msg:
1624 # interfaces: …
1625 #
1626 def get_vminstance(self, vim_vm_uuid=None):
1627 """Returns the VM instance information from VIM"""
1628
1629 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1630 vca = self.connect()
1631 if not vca:
1632 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1633
1634 vdc = vca.get_vdc(self.tenant_name)
1635 if vdc is None:
1636 raise vimconn.vimconnConnectionException(
1637 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1638
1639 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1640 if not vm_info_dict:
1641 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1642 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1643
1644 status_key = vm_info_dict['status']
1645 error = ''
1646 try:
1647 vm_dict = {'created': vm_info_dict['created'],
1648 'description': vm_info_dict['name'],
1649 'status': vcdStatusCode2manoFormat[int(status_key)],
1650 'hostId': vm_info_dict['vmuuid'],
1651 'error_msg': error,
1652 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1653
1654 if 'interfaces' in vm_info_dict:
1655 vm_dict['interfaces'] = vm_info_dict['interfaces']
1656 else:
1657 vm_dict['interfaces'] = []
1658 except KeyError:
1659 vm_dict = {'created': '',
1660 'description': '',
1661 'status': vcdStatusCode2manoFormat[int(-1)],
1662 'hostId': vm_info_dict['vmuuid'],
1663 'error_msg': "Inconsistency state",
1664 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1665
1666 return vm_dict
1667
1668 def delete_vminstance(self, vm__vim_uuid):
1669 """Method poweroff and remove VM instance from vcloud director network.
1670
1671 Args:
1672 vm__vim_uuid: VM UUID
1673
1674 Returns:
1675 Returns the instance identifier
1676 """
1677
1678 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1679 vca = self.connect()
1680 if not vca:
1681 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1682
1683 vdc = vca.get_vdc(self.tenant_name)
1684 if vdc is None:
1685 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1686 self.tenant_name))
1687 raise vimconn.vimconnException(
1688 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1689
1690 try:
1691 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1692 if vapp_name is None:
1693 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1694 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1695 else:
1696 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1697
1698 # Delete vApp and wait for status change if task executed and vApp is None.
1699 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1700
1701 if vapp:
1702 if vapp.me.deployed:
1703 self.logger.info("Powering off vApp {}".format(vapp_name))
1704 #Power off vApp
1705 powered_off = False
1706 wait_time = 0
1707 while wait_time <= MAX_WAIT_TIME:
1708 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1709 if not vapp:
1710 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1711 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1712
1713 power_off_task = vapp.poweroff()
1714 if type(power_off_task) is GenericTask:
1715 result = vca.block_until_completed(power_off_task)
1716 if result:
1717 powered_off = True
1718 break
1719 else:
1720 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1721 time.sleep(INTERVAL_TIME)
1722
1723 wait_time +=INTERVAL_TIME
1724 if not powered_off:
1725 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1726 else:
1727 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1728
1729 #Undeploy vApp
1730 self.logger.info("Undeploy vApp {}".format(vapp_name))
1731 wait_time = 0
1732 undeployed = False
1733 while wait_time <= MAX_WAIT_TIME:
1734 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1735 if not vapp:
1736 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1737 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1738 undeploy_task = vapp.undeploy(action='powerOff')
1739
1740 if type(undeploy_task) is GenericTask:
1741 result = vca.block_until_completed(undeploy_task)
1742 if result:
1743 undeployed = True
1744 break
1745 else:
1746 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1747 time.sleep(INTERVAL_TIME)
1748
1749 wait_time +=INTERVAL_TIME
1750
1751 if not undeployed:
1752 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1753
1754 # delete vapp
1755 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1756 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1757
1758 if vapp is not None:
1759 wait_time = 0
1760 result = False
1761
1762 while wait_time <= MAX_WAIT_TIME:
1763 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1764 if not vapp:
1765 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1766 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1767
1768 delete_task = vapp.delete()
1769
1770 if type(delete_task) is GenericTask:
1771 vca.block_until_completed(delete_task)
1772 result = vca.block_until_completed(delete_task)
1773 if result:
1774 break
1775 else:
1776 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1777 time.sleep(INTERVAL_TIME)
1778
1779 wait_time +=INTERVAL_TIME
1780
1781 if not result:
1782 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1783
1784 except:
1785 self.logger.debug(traceback.format_exc())
1786 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1787
1788 if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
1789 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1790 return vm__vim_uuid
1791 else:
1792 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1793
1794 def refresh_vms_status(self, vm_list):
1795 """Get the status of the virtual machines and their interfaces/ports
1796 Params: the list of VM identifiers
1797 Returns a dictionary with:
1798 vm_id: #VIM id of this Virtual Machine
1799 status: #Mandatory. Text with one of:
1800 # DELETED (not found at vim)
1801 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1802 # OTHER (Vim reported other status not understood)
1803 # ERROR (VIM indicates an ERROR status)
1804 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1805 # CREATING (on building process), ERROR
1806 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1807 #
1808 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1809 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1810 interfaces:
1811 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1812 mac_address: #Text format XX:XX:XX:XX:XX:XX
1813 vim_net_id: #network id where this interface is connected
1814 vim_interface_id: #interface/port VIM id
1815 ip_address: #null, or text with IPv4, IPv6 address
1816 """
1817
1818 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1819
1820 vca = self.connect()
1821 if not vca:
1822 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1823
1824 vdc = vca.get_vdc(self.tenant_name)
1825 if vdc is None:
1826 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1827
1828 vms_dict = {}
1829 nsx_edge_list = []
1830 for vmuuid in vm_list:
1831 vmname = self.get_namebyvappid(vca, vdc, vmuuid)
1832 if vmname is not None:
1833
1834 try:
1835 the_vapp = vca.get_vapp(vdc, vmname)
1836 vm_info = the_vapp.get_vms_details()
1837 vm_status = vm_info[0]['status']
1838 vm_pci_details = self.get_vm_pci_details(vmuuid)
1839 vm_info[0].update(vm_pci_details)
1840
1841 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1842 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1843 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1844
1845 # get networks
1846 vm_app_networks = the_vapp.get_vms_network_info()
1847 for vapp_network in vm_app_networks:
1848 for vm_network in vapp_network:
1849 if vm_network['name'] == vmname:
1850 #Assign IP Address based on MAC Address in NSX DHCP lease info
1851 if vm_network['ip'] is None:
1852 if not nsx_edge_list:
1853 nsx_edge_list = self.get_edge_details()
1854 if nsx_edge_list is None:
1855 raise vimconn.vimconnException("refresh_vms_status:"\
1856 "Failed to get edge details from NSX Manager")
1857 if vm_network['mac'] is not None:
1858 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1859
1860 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1861 interface = {"mac_address": vm_network['mac'],
1862 "vim_net_id": vm_net_id,
1863 "vim_interface_id": vm_net_id,
1864 'ip_address': vm_network['ip']}
1865 # interface['vim_info'] = yaml.safe_dump(vm_network)
1866 vm_dict["interfaces"].append(interface)
1867 # add a vm to vm dict
1868 vms_dict.setdefault(vmuuid, vm_dict)
1869 except Exception as exp:
1870 self.logger.debug("Error in response {}".format(exp))
1871 self.logger.debug(traceback.format_exc())
1872
1873 return vms_dict
1874
1875
1876 def get_edge_details(self):
1877 """Get the NSX edge list from NSX Manager
1878 Returns list of NSX edges
1879 """
1880 edge_list = []
1881 rheaders = {'Content-Type': 'application/xml'}
1882 nsx_api_url = '/api/4.0/edges'
1883
1884 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
1885
1886 try:
1887 resp = requests.get(self.nsx_manager + nsx_api_url,
1888 auth = (self.nsx_user, self.nsx_password),
1889 verify = False, headers = rheaders)
1890 if resp.status_code == requests.codes.ok:
1891 paged_Edge_List = XmlElementTree.fromstring(resp.text)
1892 for edge_pages in paged_Edge_List:
1893 if edge_pages.tag == 'edgePage':
1894 for edge_summary in edge_pages:
1895 if edge_summary.tag == 'pagingInfo':
1896 for element in edge_summary:
1897 if element.tag == 'totalCount' and element.text == '0':
1898 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
1899 .format(self.nsx_manager))
1900
1901 if edge_summary.tag == 'edgeSummary':
1902 for element in edge_summary:
1903 if element.tag == 'id':
1904 edge_list.append(element.text)
1905 else:
1906 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
1907 .format(self.nsx_manager))
1908
1909 if not edge_list:
1910 raise vimconn.vimconnException("get_edge_details: "\
1911 "No NSX edge details found: {}"
1912 .format(self.nsx_manager))
1913 else:
1914 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
1915 return edge_list
1916 else:
1917 self.logger.debug("get_edge_details: "
1918 "Failed to get NSX edge details from NSX Manager: {}"
1919 .format(resp.content))
1920 return None
1921
1922 except Exception as exp:
1923 self.logger.debug("get_edge_details: "\
1924 "Failed to get NSX edge details from NSX Manager: {}"
1925 .format(exp))
1926 raise vimconn.vimconnException("get_edge_details: "\
1927 "Failed to get NSX edge details from NSX Manager: {}"
1928 .format(exp))
1929
1930
1931 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
1932 """Get IP address details from NSX edges, using the MAC address
1933 PARAMS: nsx_edges : List of NSX edges
1934 mac_address : Find IP address corresponding to this MAC address
1935 Returns: IP address corrresponding to the provided MAC address
1936 """
1937
1938 ip_addr = None
1939 rheaders = {'Content-Type': 'application/xml'}
1940
1941 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
1942
1943 try:
1944 for edge in nsx_edges:
1945 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
1946
1947 resp = requests.get(self.nsx_manager + nsx_api_url,
1948 auth = (self.nsx_user, self.nsx_password),
1949 verify = False, headers = rheaders)
1950
1951 if resp.status_code == requests.codes.ok:
1952 dhcp_leases = XmlElementTree.fromstring(resp.text)
1953 for child in dhcp_leases:
1954 if child.tag == 'dhcpLeaseInfo':
1955 dhcpLeaseInfo = child
1956 for leaseInfo in dhcpLeaseInfo:
1957 for elem in leaseInfo:
1958 if (elem.tag)=='macAddress':
1959 edge_mac_addr = elem.text
1960 if (elem.tag)=='ipAddress':
1961 ip_addr = elem.text
1962 if edge_mac_addr is not None:
1963 if edge_mac_addr == mac_address:
1964 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
1965 .format(ip_addr, mac_address,edge))
1966 return ip_addr
1967 else:
1968 self.logger.debug("get_ipaddr_from_NSXedge: "\
1969 "Error occurred while getting DHCP lease info from NSX Manager: {}"
1970 .format(resp.content))
1971
1972 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
1973 return None
1974
1975 except XmlElementTree.ParseError as Err:
1976 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
1977
1978
1979 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
1980 """Send and action over a VM instance from VIM
1981 Returns the vm_id if the action was successfully sent to the VIM"""
1982
1983 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
1984 if vm__vim_uuid is None or action_dict is None:
1985 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
1986
1987 vca = self.connect()
1988 if not vca:
1989 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1990
1991 vdc = vca.get_vdc(self.tenant_name)
1992 if vdc is None:
1993 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
1994
1995 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1996 if vapp_name is None:
1997 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1998 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1999 else:
2000 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2001
2002 try:
2003 the_vapp = vca.get_vapp(vdc, vapp_name)
2004 # TODO fix all status
2005 if "start" in action_dict:
2006 vm_info = the_vapp.get_vms_details()
2007 vm_status = vm_info[0]['status']
2008 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2009 if vm_status == "Suspended" or vm_status == "Powered off":
2010 power_on_task = the_vapp.poweron()
2011 result = vca.block_until_completed(power_on_task)
2012 self.instance_actions_result("start", result, vapp_name)
2013 elif "rebuild" in action_dict:
2014 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2015 rebuild_task = the_vapp.deploy(powerOn=True)
2016 result = vca.block_until_completed(rebuild_task)
2017 self.instance_actions_result("rebuild", result, vapp_name)
2018 elif "pause" in action_dict:
2019 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2020 pause_task = the_vapp.undeploy(action='suspend')
2021 result = vca.block_until_completed(pause_task)
2022 self.instance_actions_result("pause", result, vapp_name)
2023 elif "resume" in action_dict:
2024 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2025 power_task = the_vapp.poweron()
2026 result = vca.block_until_completed(power_task)
2027 self.instance_actions_result("resume", result, vapp_name)
2028 elif "shutoff" in action_dict or "shutdown" in action_dict:
2029 action_name , value = action_dict.items()[0]
2030 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2031 power_off_task = the_vapp.undeploy(action='powerOff')
2032 result = vca.block_until_completed(power_off_task)
2033 if action_name == "shutdown":
2034 self.instance_actions_result("shutdown", result, vapp_name)
2035 else:
2036 self.instance_actions_result("shutoff", result, vapp_name)
2037 elif "forceOff" in action_dict:
2038 result = the_vapp.undeploy(action='force')
2039 self.instance_actions_result("forceOff", result, vapp_name)
2040 elif "reboot" in action_dict:
2041 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2042 reboot_task = the_vapp.reboot()
2043 else:
2044 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2045 return vm__vim_uuid
2046 except Exception as exp :
2047 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2048 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2049
2050 def instance_actions_result(self, action, result, vapp_name):
2051 if result:
2052 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2053 else:
2054 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2055
2056 def get_vminstance_console(self, vm_id, console_type="vnc"):
2057 """
2058 Get a console for the virtual machine
2059 Params:
2060 vm_id: uuid of the VM
2061 console_type, can be:
2062 "novnc" (by default), "xvpvnc" for VNC types,
2063 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2064 Returns dict with the console parameters:
2065 protocol: ssh, ftp, http, https, ...
2066 server: usually ip address
2067 port: the http, ssh, ... port
2068 suffix: extra text, e.g. the http path and query string
2069 """
2070 raise vimconn.vimconnNotImplemented("Should have implemented this")
2071
2072 # NOT USED METHODS in current version
2073
2074 def host_vim2gui(self, host, server_dict):
2075 """Transform host dictionary from VIM format to GUI format,
2076 and append to the server_dict
2077 """
2078 raise vimconn.vimconnNotImplemented("Should have implemented this")
2079
2080 def get_hosts_info(self):
2081 """Get the information of deployed hosts
2082 Returns the hosts content"""
2083 raise vimconn.vimconnNotImplemented("Should have implemented this")
2084
2085 def get_hosts(self, vim_tenant):
2086 """Get the hosts and deployed instances
2087 Returns the hosts content"""
2088 raise vimconn.vimconnNotImplemented("Should have implemented this")
2089
2090 def get_processor_rankings(self):
2091 """Get the processor rankings in the VIM database"""
2092 raise vimconn.vimconnNotImplemented("Should have implemented this")
2093
2094 def new_host(self, host_data):
2095 """Adds a new host to VIM"""
2096 '''Returns status code of the VIM response'''
2097 raise vimconn.vimconnNotImplemented("Should have implemented this")
2098
2099 def new_external_port(self, port_data):
2100 """Adds a external port to VIM"""
2101 '''Returns the port identifier'''
2102 raise vimconn.vimconnNotImplemented("Should have implemented this")
2103
2104 def new_external_network(self, net_name, net_type):
2105 """Adds a external network to VIM (shared)"""
2106 '''Returns the network identifier'''
2107 raise vimconn.vimconnNotImplemented("Should have implemented this")
2108
2109 def connect_port_network(self, port_id, network_id, admin=False):
2110 """Connects a external port to a network"""
2111 '''Returns status code of the VIM response'''
2112 raise vimconn.vimconnNotImplemented("Should have implemented this")
2113
2114 def new_vminstancefromJSON(self, vm_data):
2115 """Adds a VM instance to VIM"""
2116 '''Returns the instance identifier'''
2117 raise vimconn.vimconnNotImplemented("Should have implemented this")
2118
2119 def get_network_name_by_id(self, network_uuid=None):
2120 """Method gets vcloud director network named based on supplied uuid.
2121
2122 Args:
2123 network_uuid: network_id
2124
2125 Returns:
2126 The return network name.
2127 """
2128
2129 vca = self.connect()
2130 if not vca:
2131 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2132
2133 if not network_uuid:
2134 return None
2135
2136 try:
2137 org_dict = self.get_org(self.org_uuid)
2138 if 'networks' in org_dict:
2139 org_network_dict = org_dict['networks']
2140 for net_uuid in org_network_dict:
2141 if net_uuid == network_uuid:
2142 return org_network_dict[net_uuid]
2143 except:
2144 self.logger.debug("Exception in get_network_name_by_id")
2145 self.logger.debug(traceback.format_exc())
2146
2147 return None
2148
2149 def get_network_id_by_name(self, network_name=None):
2150 """Method gets vcloud director network uuid based on supplied name.
2151
2152 Args:
2153 network_name: network_name
2154 Returns:
2155 The return network uuid.
2156 network_uuid: network_id
2157 """
2158
2159 vca = self.connect()
2160 if not vca:
2161 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2162
2163 if not network_name:
2164 self.logger.debug("get_network_id_by_name() : Network name is empty")
2165 return None
2166
2167 try:
2168 org_dict = self.get_org(self.org_uuid)
2169 if org_dict and 'networks' in org_dict:
2170 org_network_dict = org_dict['networks']
2171 for net_uuid,net_name in org_network_dict.iteritems():
2172 if net_name == network_name:
2173 return net_uuid
2174
2175 except KeyError as exp:
2176 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2177
2178 return None
2179
2180 def list_org_action(self):
2181 """
2182 Method leverages vCloud director and query for available organization for particular user
2183
2184 Args:
2185 vca - is active VCA connection.
2186 vdc_name - is a vdc name that will be used to query vms action
2187
2188 Returns:
2189 The return XML respond
2190 """
2191
2192 vca = self.connect()
2193 if not vca:
2194 raise vimconn.vimconnConnectionException("self.connect() is failed")
2195
2196 url_list = [vca.host, '/api/org']
2197 vm_list_rest_call = ''.join(url_list)
2198
2199 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2200 response = Http.get(url=vm_list_rest_call,
2201 headers=vca.vcloud_session.get_vcloud_headers(),
2202 verify=vca.verify,
2203 logger=vca.logger)
2204 if response.status_code == requests.codes.ok:
2205 return response.content
2206
2207 return None
2208
2209 def get_org_action(self, org_uuid=None):
2210 """
2211 Method leverages vCloud director and retrieve available object fdr organization.
2212
2213 Args:
2214 vca - is active VCA connection.
2215 vdc_name - is a vdc name that will be used to query vms action
2216
2217 Returns:
2218 The return XML respond
2219 """
2220
2221 vca = self.connect()
2222 if not vca:
2223 raise vimconn.vimconnConnectionException("self.connect() is failed")
2224
2225 if org_uuid is None:
2226 return None
2227
2228 url_list = [vca.host, '/api/org/', org_uuid]
2229 vm_list_rest_call = ''.join(url_list)
2230
2231 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2232 response = Http.get(url=vm_list_rest_call,
2233 headers=vca.vcloud_session.get_vcloud_headers(),
2234 verify=vca.verify,
2235 logger=vca.logger)
2236 if response.status_code == requests.codes.ok:
2237 return response.content
2238
2239 return None
2240
2241 def get_org(self, org_uuid=None):
2242 """
2243 Method retrieves available organization in vCloud Director
2244
2245 Args:
2246 org_uuid - is a organization uuid.
2247
2248 Returns:
2249 The return dictionary with following key
2250 "network" - for network list under the org
2251 "catalogs" - for network list under the org
2252 "vdcs" - for vdc list under org
2253 """
2254
2255 org_dict = {}
2256 vca = self.connect()
2257 if not vca:
2258 raise vimconn.vimconnConnectionException("self.connect() is failed")
2259
2260 if org_uuid is None:
2261 return org_dict
2262
2263 content = self.get_org_action(org_uuid=org_uuid)
2264 try:
2265 vdc_list = {}
2266 network_list = {}
2267 catalog_list = {}
2268 vm_list_xmlroot = XmlElementTree.fromstring(content)
2269 for child in vm_list_xmlroot:
2270 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2271 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2272 org_dict['vdcs'] = vdc_list
2273 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2274 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2275 org_dict['networks'] = network_list
2276 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2277 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2278 org_dict['catalogs'] = catalog_list
2279 except:
2280 pass
2281
2282 return org_dict
2283
2284 def get_org_list(self):
2285 """
2286 Method retrieves available organization in vCloud Director
2287
2288 Args:
2289 vca - is active VCA connection.
2290
2291 Returns:
2292 The return dictionary and key for each entry VDC UUID
2293 """
2294
2295 org_dict = {}
2296 vca = self.connect()
2297 if not vca:
2298 raise vimconn.vimconnConnectionException("self.connect() is failed")
2299
2300 content = self.list_org_action()
2301 try:
2302 vm_list_xmlroot = XmlElementTree.fromstring(content)
2303 for vm_xml in vm_list_xmlroot:
2304 if vm_xml.tag.split("}")[1] == 'Org':
2305 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2306 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2307 except:
2308 pass
2309
2310 return org_dict
2311
2312 def vms_view_action(self, vdc_name=None):
2313 """ Method leverages vCloud director vms query call
2314
2315 Args:
2316 vca - is active VCA connection.
2317 vdc_name - is a vdc name that will be used to query vms action
2318
2319 Returns:
2320 The return XML respond
2321 """
2322 vca = self.connect()
2323 if vdc_name is None:
2324 return None
2325
2326 url_list = [vca.host, '/api/vms/query']
2327 vm_list_rest_call = ''.join(url_list)
2328
2329 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2330 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2331 vca.vcloud_session.organization.Link)
2332 if len(refs) == 1:
2333 response = Http.get(url=vm_list_rest_call,
2334 headers=vca.vcloud_session.get_vcloud_headers(),
2335 verify=vca.verify,
2336 logger=vca.logger)
2337 if response.status_code == requests.codes.ok:
2338 return response.content
2339
2340 return None
2341
2342 def get_vapp_list(self, vdc_name=None):
2343 """
2344 Method retrieves vApp list deployed vCloud director and returns a dictionary
2345 contains a list of all vapp deployed for queried VDC.
2346 The key for a dictionary is vApp UUID
2347
2348
2349 Args:
2350 vca - is active VCA connection.
2351 vdc_name - is a vdc name that will be used to query vms action
2352
2353 Returns:
2354 The return dictionary and key for each entry vapp UUID
2355 """
2356
2357 vapp_dict = {}
2358 if vdc_name is None:
2359 return vapp_dict
2360
2361 content = self.vms_view_action(vdc_name=vdc_name)
2362 try:
2363 vm_list_xmlroot = XmlElementTree.fromstring(content)
2364 for vm_xml in vm_list_xmlroot:
2365 if vm_xml.tag.split("}")[1] == 'VMRecord':
2366 if vm_xml.attrib['isVAppTemplate'] == 'true':
2367 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2368 if 'vappTemplate-' in rawuuid[0]:
2369 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2370 # vm and use raw UUID as key
2371 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2372 except:
2373 pass
2374
2375 return vapp_dict
2376
2377 def get_vm_list(self, vdc_name=None):
2378 """
2379 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2380 contains a list of all VM's deployed for queried VDC.
2381 The key for a dictionary is VM UUID
2382
2383
2384 Args:
2385 vca - is active VCA connection.
2386 vdc_name - is a vdc name that will be used to query vms action
2387
2388 Returns:
2389 The return dictionary and key for each entry vapp UUID
2390 """
2391 vm_dict = {}
2392
2393 if vdc_name is None:
2394 return vm_dict
2395
2396 content = self.vms_view_action(vdc_name=vdc_name)
2397 try:
2398 vm_list_xmlroot = XmlElementTree.fromstring(content)
2399 for vm_xml in vm_list_xmlroot:
2400 if vm_xml.tag.split("}")[1] == 'VMRecord':
2401 if vm_xml.attrib['isVAppTemplate'] == 'false':
2402 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2403 if 'vm-' in rawuuid[0]:
2404 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2405 # vm and use raw UUID as key
2406 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2407 except:
2408 pass
2409
2410 return vm_dict
2411
2412 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2413 """
2414 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2415 contains a list of all VM's deployed for queried VDC.
2416 The key for a dictionary is VM UUID
2417
2418
2419 Args:
2420 vca - is active VCA connection.
2421 vdc_name - is a vdc name that will be used to query vms action
2422
2423 Returns:
2424 The return dictionary and key for each entry vapp UUID
2425 """
2426 vm_dict = {}
2427 vca = self.connect()
2428 if not vca:
2429 raise vimconn.vimconnConnectionException("self.connect() is failed")
2430
2431 if vdc_name is None:
2432 return vm_dict
2433
2434 content = self.vms_view_action(vdc_name=vdc_name)
2435 try:
2436 vm_list_xmlroot = XmlElementTree.fromstring(content)
2437 for vm_xml in vm_list_xmlroot:
2438 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2439 # lookup done by UUID
2440 if isuuid:
2441 if vapp_name in vm_xml.attrib['container']:
2442 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2443 if 'vm-' in rawuuid[0]:
2444 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2445 break
2446 # lookup done by Name
2447 else:
2448 if vapp_name in vm_xml.attrib['name']:
2449 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2450 if 'vm-' in rawuuid[0]:
2451 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2452 break
2453 except:
2454 pass
2455
2456 return vm_dict
2457
2458 def get_network_action(self, network_uuid=None):
2459 """
2460 Method leverages vCloud director and query network based on network uuid
2461
2462 Args:
2463 vca - is active VCA connection.
2464 network_uuid - is a network uuid
2465
2466 Returns:
2467 The return XML respond
2468 """
2469
2470 vca = self.connect()
2471 if not vca:
2472 raise vimconn.vimconnConnectionException("self.connect() is failed")
2473
2474 if network_uuid is None:
2475 return None
2476
2477 url_list = [vca.host, '/api/network/', network_uuid]
2478 vm_list_rest_call = ''.join(url_list)
2479
2480 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2481 response = Http.get(url=vm_list_rest_call,
2482 headers=vca.vcloud_session.get_vcloud_headers(),
2483 verify=vca.verify,
2484 logger=vca.logger)
2485 if response.status_code == requests.codes.ok:
2486 return response.content
2487
2488 return None
2489
2490 def get_vcd_network(self, network_uuid=None):
2491 """
2492 Method retrieves available network from vCloud Director
2493
2494 Args:
2495 network_uuid - is VCD network UUID
2496
2497 Each element serialized as key : value pair
2498
2499 Following keys available for access. network_configuration['Gateway'}
2500 <Configuration>
2501 <IpScopes>
2502 <IpScope>
2503 <IsInherited>true</IsInherited>
2504 <Gateway>172.16.252.100</Gateway>
2505 <Netmask>255.255.255.0</Netmask>
2506 <Dns1>172.16.254.201</Dns1>
2507 <Dns2>172.16.254.202</Dns2>
2508 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2509 <IsEnabled>true</IsEnabled>
2510 <IpRanges>
2511 <IpRange>
2512 <StartAddress>172.16.252.1</StartAddress>
2513 <EndAddress>172.16.252.99</EndAddress>
2514 </IpRange>
2515 </IpRanges>
2516 </IpScope>
2517 </IpScopes>
2518 <FenceMode>bridged</FenceMode>
2519
2520 Returns:
2521 The return dictionary and key for each entry vapp UUID
2522 """
2523
2524 network_configuration = {}
2525 if network_uuid is None:
2526 return network_uuid
2527
2528 try:
2529 content = self.get_network_action(network_uuid=network_uuid)
2530 vm_list_xmlroot = XmlElementTree.fromstring(content)
2531
2532 network_configuration['status'] = vm_list_xmlroot.get("status")
2533 network_configuration['name'] = vm_list_xmlroot.get("name")
2534 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2535
2536 for child in vm_list_xmlroot:
2537 if child.tag.split("}")[1] == 'IsShared':
2538 network_configuration['isShared'] = child.text.strip()
2539 if child.tag.split("}")[1] == 'Configuration':
2540 for configuration in child.iter():
2541 tagKey = configuration.tag.split("}")[1].strip()
2542 if tagKey != "":
2543 network_configuration[tagKey] = configuration.text.strip()
2544 return network_configuration
2545 except Exception as exp :
2546 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2547 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2548
2549 return network_configuration
2550
2551 def delete_network_action(self, network_uuid=None):
2552 """
2553 Method delete given network from vCloud director
2554
2555 Args:
2556 network_uuid - is a network uuid that client wish to delete
2557
2558 Returns:
2559 The return None or XML respond or false
2560 """
2561
2562 vca = self.connect_as_admin()
2563 if not vca:
2564 raise vimconn.vimconnConnectionException("self.connect() is failed")
2565 if network_uuid is None:
2566 return False
2567
2568 url_list = [vca.host, '/api/admin/network/', network_uuid]
2569 vm_list_rest_call = ''.join(url_list)
2570
2571 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2572 response = Http.delete(url=vm_list_rest_call,
2573 headers=vca.vcloud_session.get_vcloud_headers(),
2574 verify=vca.verify,
2575 logger=vca.logger)
2576
2577 if response.status_code == 202:
2578 return True
2579
2580 return False
2581
2582 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2583 ip_profile=None, isshared='true'):
2584 """
2585 Method create network in vCloud director
2586
2587 Args:
2588 network_name - is network name to be created.
2589 net_type - can be 'bridge','data','ptp','mgmt'.
2590 ip_profile is a dict containing the IP parameters of the network
2591 isshared - is a boolean
2592 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2593 It optional attribute. by default if no parent network indicate the first available will be used.
2594
2595 Returns:
2596 The return network uuid or return None
2597 """
2598
2599 new_network_name = [network_name, '-', str(uuid.uuid4())]
2600 content = self.create_network_rest(network_name=''.join(new_network_name),
2601 ip_profile=ip_profile,
2602 net_type=net_type,
2603 parent_network_uuid=parent_network_uuid,
2604 isshared=isshared)
2605 if content is None:
2606 self.logger.debug("Failed create network {}.".format(network_name))
2607 return None
2608
2609 try:
2610 vm_list_xmlroot = XmlElementTree.fromstring(content)
2611 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2612 if len(vcd_uuid) == 4:
2613 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2614 return vcd_uuid[3]
2615 except:
2616 self.logger.debug("Failed create network {}".format(network_name))
2617 return None
2618
2619 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2620 ip_profile=None, isshared='true'):
2621 """
2622 Method create network in vCloud director
2623
2624 Args:
2625 network_name - is network name to be created.
2626 net_type - can be 'bridge','data','ptp','mgmt'.
2627 ip_profile is a dict containing the IP parameters of the network
2628 isshared - is a boolean
2629 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2630 It optional attribute. by default if no parent network indicate the first available will be used.
2631
2632 Returns:
2633 The return network uuid or return None
2634 """
2635
2636 vca = self.connect_as_admin()
2637 if not vca:
2638 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2639 if network_name is None:
2640 return None
2641
2642 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2643 vm_list_rest_call = ''.join(url_list)
2644 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2645 response = Http.get(url=vm_list_rest_call,
2646 headers=vca.vcloud_session.get_vcloud_headers(),
2647 verify=vca.verify,
2648 logger=vca.logger)
2649
2650 provider_network = None
2651 available_networks = None
2652 add_vdc_rest_url = None
2653
2654 if response.status_code != requests.codes.ok:
2655 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2656 response.status_code))
2657 return None
2658 else:
2659 try:
2660 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2661 for child in vm_list_xmlroot:
2662 if child.tag.split("}")[1] == 'ProviderVdcReference':
2663 provider_network = child.attrib.get('href')
2664 # application/vnd.vmware.admin.providervdc+xml
2665 if child.tag.split("}")[1] == 'Link':
2666 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2667 and child.attrib.get('rel') == 'add':
2668 add_vdc_rest_url = child.attrib.get('href')
2669 except:
2670 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2671 self.logger.debug("Respond body {}".format(response.content))
2672 return None
2673
2674 # find pvdc provided available network
2675 response = Http.get(url=provider_network,
2676 headers=vca.vcloud_session.get_vcloud_headers(),
2677 verify=vca.verify,
2678 logger=vca.logger)
2679 if response.status_code != requests.codes.ok:
2680 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2681 response.status_code))
2682 return None
2683
2684 # available_networks.split("/")[-1]
2685
2686 if parent_network_uuid is None:
2687 try:
2688 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2689 for child in vm_list_xmlroot.iter():
2690 if child.tag.split("}")[1] == 'AvailableNetworks':
2691 for networks in child.iter():
2692 # application/vnd.vmware.admin.network+xml
2693 if networks.attrib.get('href') is not None:
2694 available_networks = networks.attrib.get('href')
2695 break
2696 except:
2697 return None
2698
2699 try:
2700 #Configure IP profile of the network
2701 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2702
2703 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2704 subnet_rand = random.randint(0, 255)
2705 ip_base = "192.168.{}.".format(subnet_rand)
2706 ip_profile['subnet_address'] = ip_base + "0/24"
2707 else:
2708 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2709
2710 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2711 ip_profile['gateway_address']=ip_base + "1"
2712 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2713 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2714 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2715 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2716 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2717 ip_profile['dhcp_start_address']=ip_base + "3"
2718 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2719 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2720 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2721 ip_profile['dns_address']=ip_base + "2"
2722
2723 gateway_address=ip_profile['gateway_address']
2724 dhcp_count=int(ip_profile['dhcp_count'])
2725 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2726
2727 if ip_profile['dhcp_enabled']==True:
2728 dhcp_enabled='true'
2729 else:
2730 dhcp_enabled='false'
2731 dhcp_start_address=ip_profile['dhcp_start_address']
2732
2733 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2734 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2735 end_ip_int += dhcp_count - 1
2736 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2737
2738 ip_version=ip_profile['ip_version']
2739 dns_address=ip_profile['dns_address']
2740 except KeyError as exp:
2741 self.logger.debug("Create Network REST: Key error {}".format(exp))
2742 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2743
2744 # either use client provided UUID or search for a first available
2745 # if both are not defined we return none
2746 if parent_network_uuid is not None:
2747 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2748 add_vdc_rest_url = ''.join(url_list)
2749
2750 #Creating all networks as Direct Org VDC type networks.
2751 #Unused in case of Underlay (data/ptp) network interface.
2752 fence_mode="bridged"
2753 is_inherited='false'
2754 dns_list = dns_address.split(";")
2755 dns1 = dns_list[0]
2756 dns2_text = ""
2757 if len(dns_list) >= 2:
2758 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2759 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2760 <Description>Openmano created</Description>
2761 <Configuration>
2762 <IpScopes>
2763 <IpScope>
2764 <IsInherited>{1:s}</IsInherited>
2765 <Gateway>{2:s}</Gateway>
2766 <Netmask>{3:s}</Netmask>
2767 <Dns1>{4:s}</Dns1>{5:s}
2768 <IsEnabled>{6:s}</IsEnabled>
2769 <IpRanges>
2770 <IpRange>
2771 <StartAddress>{7:s}</StartAddress>
2772 <EndAddress>{8:s}</EndAddress>
2773 </IpRange>
2774 </IpRanges>
2775 </IpScope>
2776 </IpScopes>
2777 <ParentNetwork href="{9:s}"/>
2778 <FenceMode>{10:s}</FenceMode>
2779 </Configuration>
2780 <IsShared>{11:s}</IsShared>
2781 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2782 subnet_address, dns1, dns2_text, dhcp_enabled,
2783 dhcp_start_address, dhcp_end_address, available_networks,
2784 fence_mode, isshared)
2785
2786 headers = vca.vcloud_session.get_vcloud_headers()
2787 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2788 try:
2789 response = Http.post(url=add_vdc_rest_url,
2790 headers=headers,
2791 data=data,
2792 verify=vca.verify,
2793 logger=vca.logger)
2794
2795 if response.status_code != 201:
2796 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2797 .format(response.status_code,response.content))
2798 else:
2799 network = networkType.parseString(response.content, True)
2800 create_nw_task = network.get_Tasks().get_Task()[0]
2801
2802 # if we all ok we respond with content after network creation completes
2803 # otherwise by default return None
2804 if create_nw_task is not None:
2805 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2806 status = vca.block_until_completed(create_nw_task)
2807 if status:
2808 return response.content
2809 else:
2810 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2811 .format(response.content))
2812 except Exception as exp:
2813 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2814
2815 return None
2816
2817 def convert_cidr_to_netmask(self, cidr_ip=None):
2818 """
2819 Method sets convert CIDR netmask address to normal IP format
2820 Args:
2821 cidr_ip : CIDR IP address
2822 Returns:
2823 netmask : Converted netmask
2824 """
2825 if cidr_ip is not None:
2826 if '/' in cidr_ip:
2827 network, net_bits = cidr_ip.split('/')
2828 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2829 else:
2830 netmask = cidr_ip
2831 return netmask
2832 return None
2833
2834 def get_provider_rest(self, vca=None):
2835 """
2836 Method gets provider vdc view from vcloud director
2837
2838 Args:
2839 network_name - is network name to be created.
2840 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2841 It optional attribute. by default if no parent network indicate the first available will be used.
2842
2843 Returns:
2844 The return xml content of respond or None
2845 """
2846
2847 url_list = [vca.host, '/api/admin']
2848 response = Http.get(url=''.join(url_list),
2849 headers=vca.vcloud_session.get_vcloud_headers(),
2850 verify=vca.verify,
2851 logger=vca.logger)
2852
2853 if response.status_code == requests.codes.ok:
2854 return response.content
2855 return None
2856
2857 def create_vdc(self, vdc_name=None):
2858
2859 vdc_dict = {}
2860
2861 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2862 if xml_content is not None:
2863 try:
2864 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2865 for child in task_resp_xmlroot:
2866 if child.tag.split("}")[1] == 'Owner':
2867 vdc_id = child.attrib.get('href').split("/")[-1]
2868 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2869 return vdc_dict
2870 except:
2871 self.logger.debug("Respond body {}".format(xml_content))
2872
2873 return None
2874
2875 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2876 """
2877 Method create vdc in vCloud director based on VDC template.
2878 it uses pre-defined template that must be named openmano
2879
2880 Args:
2881 vdc_name - name of a new vdc.
2882
2883 Returns:
2884 The return xml content of respond or None
2885 """
2886
2887 self.logger.info("Creating new vdc {}".format(vdc_name))
2888 vca = self.connect()
2889 if not vca:
2890 raise vimconn.vimconnConnectionException("self.connect() is failed")
2891 if vdc_name is None:
2892 return None
2893
2894 url_list = [vca.host, '/api/vdcTemplates']
2895 vm_list_rest_call = ''.join(url_list)
2896 response = Http.get(url=vm_list_rest_call,
2897 headers=vca.vcloud_session.get_vcloud_headers(),
2898 verify=vca.verify,
2899 logger=vca.logger)
2900
2901 # container url to a template
2902 vdc_template_ref = None
2903 try:
2904 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2905 for child in vm_list_xmlroot:
2906 # application/vnd.vmware.admin.providervdc+xml
2907 # we need find a template from witch we instantiate VDC
2908 if child.tag.split("}")[1] == 'VdcTemplate':
2909 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
2910 'name') == 'openmano':
2911 vdc_template_ref = child.attrib.get('href')
2912 except:
2913 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2914 self.logger.debug("Respond body {}".format(response.content))
2915 return None
2916
2917 # if we didn't found required pre defined template we return None
2918 if vdc_template_ref is None:
2919 return None
2920
2921 try:
2922 # instantiate vdc
2923 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
2924 vm_list_rest_call = ''.join(url_list)
2925 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2926 <Source href="{1:s}"></Source>
2927 <Description>opnemano</Description>
2928 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
2929 headers = vca.vcloud_session.get_vcloud_headers()
2930 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
2931 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
2932 logger=vca.logger)
2933 # if we all ok we respond with content otherwise by default None
2934 if response.status_code >= 200 and response.status_code < 300:
2935 return response.content
2936 return None
2937 except:
2938 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2939 self.logger.debug("Respond body {}".format(response.content))
2940
2941 return None
2942
2943 def create_vdc_rest(self, vdc_name=None):
2944 """
2945 Method create network in vCloud director
2946
2947 Args:
2948 network_name - is network name to be created.
2949 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2950 It optional attribute. by default if no parent network indicate the first available will be used.
2951
2952 Returns:
2953 The return network uuid or return None
2954 """
2955
2956 self.logger.info("Creating new vdc {}".format(vdc_name))
2957
2958 vca = self.connect_as_admin()
2959 if not vca:
2960 raise vimconn.vimconnConnectionException("self.connect() is failed")
2961 if vdc_name is None:
2962 return None
2963
2964 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
2965 vm_list_rest_call = ''.join(url_list)
2966 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2967 response = Http.get(url=vm_list_rest_call,
2968 headers=vca.vcloud_session.get_vcloud_headers(),
2969 verify=vca.verify,
2970 logger=vca.logger)
2971
2972 provider_vdc_ref = None
2973 add_vdc_rest_url = None
2974 available_networks = None
2975
2976 if response.status_code != requests.codes.ok:
2977 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2978 response.status_code))
2979 return None
2980 else:
2981 try:
2982 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2983 for child in vm_list_xmlroot:
2984 # application/vnd.vmware.admin.providervdc+xml
2985 if child.tag.split("}")[1] == 'Link':
2986 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
2987 and child.attrib.get('rel') == 'add':
2988 add_vdc_rest_url = child.attrib.get('href')
2989 except:
2990 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2991 self.logger.debug("Respond body {}".format(response.content))
2992 return None
2993
2994 response = self.get_provider_rest(vca=vca)
2995 try:
2996 vm_list_xmlroot = XmlElementTree.fromstring(response)
2997 for child in vm_list_xmlroot:
2998 if child.tag.split("}")[1] == 'ProviderVdcReferences':
2999 for sub_child in child:
3000 provider_vdc_ref = sub_child.attrib.get('href')
3001 except:
3002 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3003 self.logger.debug("Respond body {}".format(response))
3004 return None
3005
3006 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
3007 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
3008 <AllocationModel>ReservationPool</AllocationModel>
3009 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3010 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3011 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3012 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3013 <ProviderVdcReference
3014 name="Main Provider"
3015 href="{2:s}" />
3016 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3017 escape(vdc_name),
3018 provider_vdc_ref)
3019
3020 headers = vca.vcloud_session.get_vcloud_headers()
3021 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3022 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3023 logger=vca.logger)
3024
3025 # if we all ok we respond with content otherwise by default None
3026 if response.status_code == 201:
3027 return response.content
3028 return None
3029
3030 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3031 """
3032 Method retrieve vapp detail from vCloud director
3033
3034 Args:
3035 vapp_uuid - is vapp identifier.
3036
3037 Returns:
3038 The return network uuid or return None
3039 """
3040
3041 parsed_respond = {}
3042 vca = None
3043
3044 if need_admin_access:
3045 vca = self.connect_as_admin()
3046 else:
3047 vca = self.connect()
3048
3049 if not vca:
3050 raise vimconn.vimconnConnectionException("self.connect() is failed")
3051 if vapp_uuid is None:
3052 return None
3053
3054 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3055 get_vapp_restcall = ''.join(url_list)
3056
3057 if vca.vcloud_session and vca.vcloud_session.organization:
3058 response = Http.get(url=get_vapp_restcall,
3059 headers=vca.vcloud_session.get_vcloud_headers(),
3060 verify=vca.verify,
3061 logger=vca.logger)
3062
3063 if response.status_code != requests.codes.ok:
3064 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3065 response.status_code))
3066 return parsed_respond
3067
3068 try:
3069 xmlroot_respond = XmlElementTree.fromstring(response.content)
3070 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3071
3072 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3073 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3074 'vmw': 'http://www.vmware.com/schema/ovf',
3075 'vm': 'http://www.vmware.com/vcloud/v1.5',
3076 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3077 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3078 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3079 }
3080
3081 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3082 if created_section is not None:
3083 parsed_respond['created'] = created_section.text
3084
3085 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3086 if network_section is not None and 'networkName' in network_section.attrib:
3087 parsed_respond['networkname'] = network_section.attrib['networkName']
3088
3089 ipscopes_section = \
3090 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3091 namespaces)
3092 if ipscopes_section is not None:
3093 for ipscope in ipscopes_section:
3094 for scope in ipscope:
3095 tag_key = scope.tag.split("}")[1]
3096 if tag_key == 'IpRanges':
3097 ip_ranges = scope.getchildren()
3098 for ipblock in ip_ranges:
3099 for block in ipblock:
3100 parsed_respond[block.tag.split("}")[1]] = block.text
3101 else:
3102 parsed_respond[tag_key] = scope.text
3103
3104 # parse children section for other attrib
3105 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3106 if children_section is not None:
3107 parsed_respond['name'] = children_section.attrib['name']
3108 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3109 if "nestedHypervisorEnabled" in children_section.attrib else None
3110 parsed_respond['deployed'] = children_section.attrib['deployed']
3111 parsed_respond['status'] = children_section.attrib['status']
3112 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3113 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3114 nic_list = []
3115 for adapters in network_adapter:
3116 adapter_key = adapters.tag.split("}")[1]
3117 if adapter_key == 'PrimaryNetworkConnectionIndex':
3118 parsed_respond['primarynetwork'] = adapters.text
3119 if adapter_key == 'NetworkConnection':
3120 vnic = {}
3121 if 'network' in adapters.attrib:
3122 vnic['network'] = adapters.attrib['network']
3123 for adapter in adapters:
3124 setting_key = adapter.tag.split("}")[1]
3125 vnic[setting_key] = adapter.text
3126 nic_list.append(vnic)
3127
3128 for link in children_section:
3129 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3130 if link.attrib['rel'] == 'screen:acquireTicket':
3131 parsed_respond['acquireTicket'] = link.attrib
3132 if link.attrib['rel'] == 'screen:acquireMksTicket':
3133 parsed_respond['acquireMksTicket'] = link.attrib
3134
3135 parsed_respond['interfaces'] = nic_list
3136 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3137 if vCloud_extension_section is not None:
3138 vm_vcenter_info = {}
3139 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3140 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3141 if vmext is not None:
3142 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3143 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3144
3145 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3146 vm_virtual_hardware_info = {}
3147 if virtual_hardware_section is not None:
3148 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3149 if item.find("rasd:Description",namespaces).text == "Hard disk":
3150 disk_size = item.find("rasd:HostResource" ,namespaces
3151 ).attrib["{"+namespaces['vm']+"}capacity"]
3152
3153 vm_virtual_hardware_info["disk_size"]= disk_size
3154 break
3155
3156 for link in virtual_hardware_section:
3157 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3158 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3159 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3160 break
3161
3162 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3163 except Exception as exp :
3164 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3165 return parsed_respond
3166
3167 def acuire_console(self, vm_uuid=None):
3168
3169 vca = self.connect()
3170 if not vca:
3171 raise vimconn.vimconnConnectionException("self.connect() is failed")
3172 if vm_uuid is None:
3173 return None
3174
3175 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3176 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3177 console_dict = vm_dict['acquireTicket']
3178 console_rest_call = console_dict['href']
3179
3180 response = Http.post(url=console_rest_call,
3181 headers=vca.vcloud_session.get_vcloud_headers(),
3182 verify=vca.verify,
3183 logger=vca.logger)
3184
3185 if response.status_code == requests.codes.ok:
3186 return response.content
3187
3188 return None
3189
3190 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3191 """
3192 Method retrieve vm disk details
3193
3194 Args:
3195 vapp_uuid - is vapp identifier.
3196 flavor_disk - disk size as specified in VNFD (flavor)
3197
3198 Returns:
3199 The return network uuid or return None
3200 """
3201 status = None
3202 try:
3203 #Flavor disk is in GB convert it into MB
3204 flavor_disk = int(flavor_disk) * 1024
3205 vm_details = self.get_vapp_details_rest(vapp_uuid)
3206 if vm_details:
3207 vm_name = vm_details["name"]
3208 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3209
3210 if vm_details and "vm_virtual_hardware" in vm_details:
3211 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3212 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3213
3214 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3215
3216 if flavor_disk > vm_disk:
3217 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3218 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3219 vm_disk, flavor_disk ))
3220 else:
3221 status = True
3222 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3223
3224 return status
3225 except Exception as exp:
3226 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3227
3228
3229 def modify_vm_disk_rest(self, disk_href , disk_size):
3230 """
3231 Method retrieve modify vm disk size
3232
3233 Args:
3234 disk_href - vCD API URL to GET and PUT disk data
3235 disk_size - disk size as specified in VNFD (flavor)
3236
3237 Returns:
3238 The return network uuid or return None
3239 """
3240 vca = self.connect()
3241 if not vca:
3242 raise vimconn.vimconnConnectionException("self.connect() is failed")
3243 if disk_href is None or disk_size is None:
3244 return None
3245
3246 if vca.vcloud_session and vca.vcloud_session.organization:
3247 response = Http.get(url=disk_href,
3248 headers=vca.vcloud_session.get_vcloud_headers(),
3249 verify=vca.verify,
3250 logger=vca.logger)
3251
3252 if response.status_code != requests.codes.ok:
3253 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3254 response.status_code))
3255 return None
3256 try:
3257 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3258 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3259 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3260
3261 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3262 if item.find("rasd:Description",namespaces).text == "Hard disk":
3263 disk_item = item.find("rasd:HostResource" ,namespaces )
3264 if disk_item is not None:
3265 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3266 break
3267
3268 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3269 xml_declaration=True)
3270
3271 #Send PUT request to modify disk size
3272 headers = vca.vcloud_session.get_vcloud_headers()
3273 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3274
3275 response = Http.put(url=disk_href,
3276 data=data,
3277 headers=headers,
3278 verify=vca.verify, logger=self.logger)
3279
3280 if response.status_code != 202:
3281 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3282 response.status_code))
3283 else:
3284 modify_disk_task = taskType.parseString(response.content, True)
3285 if type(modify_disk_task) is GenericTask:
3286 status = vca.block_until_completed(modify_disk_task)
3287 return status
3288
3289 return None
3290
3291 except Exception as exp :
3292 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3293 return None
3294
3295 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3296 """
3297 Method to attach pci devices to VM
3298
3299 Args:
3300 vapp_uuid - uuid of vApp/VM
3301 pci_devices - pci devices infromation as specified in VNFD (flavor)
3302
3303 Returns:
3304 The status of add pci device task , vm object and
3305 vcenter_conect object
3306 """
3307 vm_obj = None
3308 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3309 vcenter_conect, content = self.get_vcenter_content()
3310 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3311
3312 if vm_moref_id:
3313 try:
3314 no_of_pci_devices = len(pci_devices)
3315 if no_of_pci_devices > 0:
3316 #Get VM and its host
3317 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3318 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3319 if host_obj and vm_obj:
3320 #get PCI devies from host on which vapp is currently installed
3321 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3322
3323 if avilable_pci_devices is None:
3324 #find other hosts with active pci devices
3325 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3326 content,
3327 no_of_pci_devices
3328 )
3329
3330 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3331 #Migrate vm to the host where PCI devices are availble
3332 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3333 task = self.relocate_vm(new_host_obj, vm_obj)
3334 if task is not None:
3335 result = self.wait_for_vcenter_task(task, vcenter_conect)
3336 self.logger.info("Migrate VM status: {}".format(result))
3337 host_obj = new_host_obj
3338 else:
3339 self.logger.info("Fail to migrate VM : {}".format(result))
3340 raise vimconn.vimconnNotFoundException(
3341 "Fail to migrate VM : {} to host {}".format(
3342 vmname_andid,
3343 new_host_obj)
3344 )
3345
3346 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3347 #Add PCI devices one by one
3348 for pci_device in avilable_pci_devices:
3349 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3350 if task:
3351 status= self.wait_for_vcenter_task(task, vcenter_conect)
3352 if status:
3353 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3354 else:
3355 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3356 return True, vm_obj, vcenter_conect
3357 else:
3358 self.logger.error("Currently there is no host with"\
3359 " {} number of avaialble PCI devices required for VM {}".format(
3360 no_of_pci_devices,
3361 vmname_andid)
3362 )
3363 raise vimconn.vimconnNotFoundException(
3364 "Currently there is no host with {} "\
3365 "number of avaialble PCI devices required for VM {}".format(
3366 no_of_pci_devices,
3367 vmname_andid))
3368 else:
3369 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3370
3371 except vmodl.MethodFault as error:
3372 self.logger.error("Error occurred while adding PCI devices {} ",error)
3373 return None, vm_obj, vcenter_conect
3374
3375 def get_vm_obj(self, content, mob_id):
3376 """
3377 Method to get the vsphere VM object associated with a given morf ID
3378 Args:
3379 vapp_uuid - uuid of vApp/VM
3380 content - vCenter content object
3381 mob_id - mob_id of VM
3382
3383 Returns:
3384 VM and host object
3385 """
3386 vm_obj = None
3387 host_obj = None
3388 try :
3389 container = content.viewManager.CreateContainerView(content.rootFolder,
3390 [vim.VirtualMachine], True
3391 )
3392 for vm in container.view:
3393 mobID = vm._GetMoId()
3394 if mobID == mob_id:
3395 vm_obj = vm
3396 host_obj = vm_obj.runtime.host
3397 break
3398 except Exception as exp:
3399 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3400 return host_obj, vm_obj
3401
3402 def get_pci_devices(self, host, need_devices):
3403 """
3404 Method to get the details of pci devices on given host
3405 Args:
3406 host - vSphere host object
3407 need_devices - number of pci devices needed on host
3408
3409 Returns:
3410 array of pci devices
3411 """
3412 all_devices = []
3413 all_device_ids = []
3414 used_devices_ids = []
3415
3416 try:
3417 if host:
3418 pciPassthruInfo = host.config.pciPassthruInfo
3419 pciDevies = host.hardware.pciDevice
3420
3421 for pci_status in pciPassthruInfo:
3422 if pci_status.passthruActive:
3423 for device in pciDevies:
3424 if device.id == pci_status.id:
3425 all_device_ids.append(device.id)
3426 all_devices.append(device)
3427
3428 #check if devices are in use
3429 avalible_devices = all_devices
3430 for vm in host.vm:
3431 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3432 vm_devices = vm.config.hardware.device
3433 for device in vm_devices:
3434 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3435 if device.backing.id in all_device_ids:
3436 for use_device in avalible_devices:
3437 if use_device.id == device.backing.id:
3438 avalible_devices.remove(use_device)
3439 used_devices_ids.append(device.backing.id)
3440 self.logger.debug("Device {} from devices {}"\
3441 "is in use".format(device.backing.id,
3442 device)
3443 )
3444 if len(avalible_devices) < need_devices:
3445 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3446 need_devices))
3447 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3448 avalible_devices))
3449 return None
3450 else:
3451 required_devices = avalible_devices[:need_devices]
3452 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3453 len(avalible_devices),
3454 host,
3455 need_devices))
3456 self.logger.info("Retruning {} devices as {}".format(need_devices,
3457 required_devices ))
3458 return required_devices
3459
3460 except Exception as exp:
3461 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3462
3463 return None
3464
3465 def get_host_and_PCIdevices(self, content, need_devices):
3466 """
3467 Method to get the details of pci devices infromation on all hosts
3468
3469 Args:
3470 content - vSphere host object
3471 need_devices - number of pci devices needed on host
3472
3473 Returns:
3474 array of pci devices and host object
3475 """
3476 host_obj = None
3477 pci_device_objs = None
3478 try:
3479 if content:
3480 container = content.viewManager.CreateContainerView(content.rootFolder,
3481 [vim.HostSystem], True)
3482 for host in container.view:
3483 devices = self.get_pci_devices(host, need_devices)
3484 if devices:
3485 host_obj = host
3486 pci_device_objs = devices
3487 break
3488 except Exception as exp:
3489 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3490
3491 return host_obj,pci_device_objs
3492
3493 def relocate_vm(self, dest_host, vm) :
3494 """
3495 Method to get the relocate VM to new host
3496
3497 Args:
3498 dest_host - vSphere host object
3499 vm - vSphere VM object
3500
3501 Returns:
3502 task object
3503 """
3504 task = None
3505 try:
3506 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3507 task = vm.Relocate(relocate_spec)
3508 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3509 except Exception as exp:
3510 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3511 dest_host, vm, exp))
3512 return task
3513
3514 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3515 """
3516 Waits and provides updates on a vSphere task
3517 """
3518 while task.info.state == vim.TaskInfo.State.running:
3519 time.sleep(2)
3520
3521 if task.info.state == vim.TaskInfo.State.success:
3522 if task.info.result is not None and not hideResult:
3523 self.logger.info('{} completed successfully, result: {}'.format(
3524 actionName,
3525 task.info.result))
3526 else:
3527 self.logger.info('Task {} completed successfully.'.format(actionName))
3528 else:
3529 self.logger.error('{} did not complete successfully: {} '.format(
3530 actionName,
3531 task.info.error)
3532 )
3533
3534 return task.info.result
3535
3536 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3537 """
3538 Method to add pci device in given VM
3539
3540 Args:
3541 host_object - vSphere host object
3542 vm_object - vSphere VM object
3543 host_pci_dev - host_pci_dev must be one of the devices from the
3544 host_object.hardware.pciDevice list
3545 which is configured as a PCI passthrough device
3546
3547 Returns:
3548 task object
3549 """
3550 task = None
3551 if vm_object and host_object and host_pci_dev:
3552 try :
3553 #Add PCI device to VM
3554 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3555 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3556
3557 if host_pci_dev.id not in systemid_by_pciid:
3558 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3559 return None
3560
3561 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3562 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3563 id=host_pci_dev.id,
3564 systemId=systemid_by_pciid[host_pci_dev.id],
3565 vendorId=host_pci_dev.vendorId,
3566 deviceName=host_pci_dev.deviceName)
3567
3568 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3569
3570 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3571 new_device_config.operation = "add"
3572 vmConfigSpec = vim.vm.ConfigSpec()
3573 vmConfigSpec.deviceChange = [new_device_config]
3574
3575 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3576 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3577 host_pci_dev, vm_object, host_object)
3578 )
3579 except Exception as exp:
3580 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3581 host_pci_dev,
3582 vm_object,
3583 exp))
3584 return task
3585
3586 def get_vm_vcenter_info(self):
3587 """
3588 Method to get details of vCenter and vm
3589
3590 Args:
3591 vapp_uuid - uuid of vApp or VM
3592
3593 Returns:
3594 Moref Id of VM and deails of vCenter
3595 """
3596 vm_vcenter_info = {}
3597
3598 if self.vcenter_ip is not None:
3599 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3600 else:
3601 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3602 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3603 if self.vcenter_port is not None:
3604 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3605 else:
3606 raise vimconn.vimconnException(message="vCenter port is not provided."\
3607 " Please provide vCenter port while attaching datacenter to tenant in --config")
3608 if self.vcenter_user is not None:
3609 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3610 else:
3611 raise vimconn.vimconnException(message="vCenter user is not provided."\
3612 " Please provide vCenter user while attaching datacenter to tenant in --config")
3613
3614 if self.vcenter_password is not None:
3615 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3616 else:
3617 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3618 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3619
3620 return vm_vcenter_info
3621
3622
3623 def get_vm_pci_details(self, vmuuid):
3624 """
3625 Method to get VM PCI device details from vCenter
3626
3627 Args:
3628 vm_obj - vSphere VM object
3629
3630 Returns:
3631 dict of PCI devives attached to VM
3632
3633 """
3634 vm_pci_devices_info = {}
3635 try:
3636 vcenter_conect, content = self.get_vcenter_content()
3637 vm_moref_id = self.get_vm_moref_id(vmuuid)
3638 if vm_moref_id:
3639 #Get VM and its host
3640 if content:
3641 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3642 if host_obj and vm_obj:
3643 vm_pci_devices_info["host_name"]= host_obj.name
3644 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3645 for device in vm_obj.config.hardware.device:
3646 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3647 device_details={'devide_id':device.backing.id,
3648 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3649 }
3650 vm_pci_devices_info[device.deviceInfo.label] = device_details
3651 else:
3652 self.logger.error("Can not connect to vCenter while getting "\
3653 "PCI devices infromationn")
3654 return vm_pci_devices_info
3655 except Exception as exp:
3656 self.logger.error("Error occurred while getting VM infromationn"\
3657 " for VM : {}".format(exp))
3658 raise vimconn.vimconnException(message=exp)
3659
3660 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3661 """
3662 Method to add network adapter type to vm
3663 Args :
3664 network_name - name of network
3665 primary_nic_index - int value for primary nic index
3666 nicIndex - int value for nic index
3667 nic_type - specify model name to which add to vm
3668 Returns:
3669 None
3670 """
3671 vca = self.connect()
3672 if not vca:
3673 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3674
3675 try:
3676 ip_address = None
3677 floating_ip = False
3678 if 'floating_ip' in net: floating_ip = net['floating_ip']
3679
3680 # Stub for ip_address feature
3681 if 'ip_address' in net: ip_address = net['ip_address']
3682
3683 if floating_ip:
3684 allocation_mode = "POOL"
3685 elif ip_address:
3686 allocation_mode = "MANUAL"
3687 else:
3688 allocation_mode = "DHCP"
3689
3690 if not nic_type:
3691 for vms in vapp._get_vms():
3692 vm_id = (vms.id).split(':')[-1]
3693
3694 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3695
3696 response = Http.get(url=url_rest_call,
3697 headers=vca.vcloud_session.get_vcloud_headers(),
3698 verify=vca.verify,
3699 logger=vca.logger)
3700 if response.status_code != 200:
3701 self.logger.error("REST call {} failed reason : {}"\
3702 "status code : {}".format(url_rest_call,
3703 response.content,
3704 response.status_code))
3705 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3706 "network connection section")
3707
3708 data = response.content
3709 if '<PrimaryNetworkConnectionIndex>' not in data:
3710 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3711 <NetworkConnection network="{}">
3712 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3713 <IsConnected>true</IsConnected>
3714 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3715 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3716 allocation_mode)
3717 # Stub for ip_address feature
3718 if ip_address:
3719 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3720 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3721
3722 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3723 else:
3724 new_item = """<NetworkConnection network="{}">
3725 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3726 <IsConnected>true</IsConnected>
3727 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3728 </NetworkConnection>""".format(network_name, nicIndex,
3729 allocation_mode)
3730 # Stub for ip_address feature
3731 if ip_address:
3732 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3733 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3734
3735 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3736
3737 headers = vca.vcloud_session.get_vcloud_headers()
3738 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3739 response = Http.put(url=url_rest_call, headers=headers, data=data,
3740 verify=vca.verify,
3741 logger=vca.logger)
3742 if response.status_code != 202:
3743 self.logger.error("REST call {} failed reason : {}"\
3744 "status code : {} ".format(url_rest_call,
3745 response.content,
3746 response.status_code))
3747 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3748 "network connection section")
3749 else:
3750 nic_task = taskType.parseString(response.content, True)
3751 if isinstance(nic_task, GenericTask):
3752 vca.block_until_completed(nic_task)
3753 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3754 "default NIC type".format(vm_id))
3755 else:
3756 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3757 "connect NIC type".format(vm_id))
3758 else:
3759 for vms in vapp._get_vms():
3760 vm_id = (vms.id).split(':')[-1]
3761
3762 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3763
3764 response = Http.get(url=url_rest_call,
3765 headers=vca.vcloud_session.get_vcloud_headers(),
3766 verify=vca.verify,
3767 logger=vca.logger)
3768 if response.status_code != 200:
3769 self.logger.error("REST call {} failed reason : {}"\
3770 "status code : {}".format(url_rest_call,
3771 response.content,
3772 response.status_code))
3773 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3774 "network connection section")
3775 data = response.content
3776 if '<PrimaryNetworkConnectionIndex>' not in data:
3777 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3778 <NetworkConnection network="{}">
3779 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3780 <IsConnected>true</IsConnected>
3781 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3782 <NetworkAdapterType>{}</NetworkAdapterType>
3783 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3784 allocation_mode, nic_type)
3785 # Stub for ip_address feature
3786 if ip_address:
3787 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3788 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3789
3790 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3791 else:
3792 new_item = """<NetworkConnection network="{}">
3793 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3794 <IsConnected>true</IsConnected>
3795 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3796 <NetworkAdapterType>{}</NetworkAdapterType>
3797 </NetworkConnection>""".format(network_name, nicIndex,
3798 allocation_mode, nic_type)
3799 # Stub for ip_address feature
3800 if ip_address:
3801 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3802 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3803
3804 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3805
3806 headers = vca.vcloud_session.get_vcloud_headers()
3807 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3808 response = Http.put(url=url_rest_call, headers=headers, data=data,
3809 verify=vca.verify,
3810 logger=vca.logger)
3811
3812 if response.status_code != 202:
3813 self.logger.error("REST call {} failed reason : {}"\
3814 "status code : {}".format(url_rest_call,
3815 response.content,
3816 response.status_code))
3817 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3818 "network connection section")
3819 else:
3820 nic_task = taskType.parseString(response.content, True)
3821 if isinstance(nic_task, GenericTask):
3822 vca.block_until_completed(nic_task)
3823 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3824 "conneced to NIC type {}".format(vm_id, nic_type))
3825 else:
3826 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3827 "failed to connect NIC type {}".format(vm_id, nic_type))
3828 except Exception as exp:
3829 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3830 "while adding Network adapter")
3831 raise vimconn.vimconnException(message=exp)
3832
3833
3834 def set_numa_affinity(self, vmuuid, paired_threads_id):
3835 """
3836 Method to assign numa affinity in vm configuration parammeters
3837 Args :
3838 vmuuid - vm uuid
3839 paired_threads_id - one or more virtual processor
3840 numbers
3841 Returns:
3842 return if True
3843 """
3844 try:
3845 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3846 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3847 context = None
3848 if hasattr(ssl, '_create_unverified_context'):
3849 context = ssl._create_unverified_context()
3850 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3851 pwd=self.passwd, port=int(vm_vcenter_port),
3852 sslContext=context)
3853 atexit.register(Disconnect, vcenter_conect)
3854 content = vcenter_conect.RetrieveContent()
3855
3856 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3857 if vm_obj:
3858 config_spec = vim.vm.ConfigSpec()
3859 config_spec.extraConfig = []
3860 opt = vim.option.OptionValue()
3861 opt.key = 'numa.nodeAffinity'
3862 opt.value = str(paired_threads_id)
3863 config_spec.extraConfig.append(opt)
3864 task = vm_obj.ReconfigVM_Task(config_spec)
3865 if task:
3866 result = self.wait_for_vcenter_task(task, vcenter_conect)
3867 extra_config = vm_obj.config.extraConfig
3868 flag = False
3869 for opts in extra_config:
3870 if 'numa.nodeAffinity' in opts.key:
3871 flag = True
3872 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
3873 "value {} for vm {}".format(opt.value, vm_obj))
3874 if flag:
3875 return
3876 else:
3877 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
3878 except Exception as exp:
3879 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
3880 "for VM {} : {}".format(vm_obj, vm_moref_id))
3881 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
3882 "affinity".format(exp))
3883
3884
3885
3886 def cloud_init(self, vapp, cloud_config):
3887 """
3888 Method to inject ssh-key
3889 vapp - vapp object
3890 cloud_config a dictionary with:
3891 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
3892 'users': (optional) list of users to be inserted, each item is a dict with:
3893 'name': (mandatory) user name,
3894 'key-pairs': (optional) list of strings with the public key to be inserted to the user
3895 'user-data': (optional) string is a text script to be passed directly to cloud-init
3896 'config-files': (optional). List of files to be transferred. Each item is a dict with:
3897 'dest': (mandatory) string with the destination absolute path
3898 'encoding': (optional, by default text). Can be one of:
3899 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
3900 'content' (mandatory): string with the content of the file
3901 'permissions': (optional) string with file permissions, typically octal notation '0644'
3902 'owner': (optional) file owner, string with the format 'owner:group'
3903 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
3904 """
3905 vca = self.connect()
3906 if not vca:
3907 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3908
3909 try:
3910 if isinstance(cloud_config, dict):
3911 key_pairs = []
3912 userdata = []
3913 if "key-pairs" in cloud_config:
3914 key_pairs = cloud_config["key-pairs"]
3915
3916 if "users" in cloud_config:
3917 userdata = cloud_config["users"]
3918
3919 for key in key_pairs:
3920 for user in userdata:
3921 if 'name' in user: user_name = user['name']
3922 if 'key-pairs' in user and len(user['key-pairs']) > 0:
3923 for user_key in user['key-pairs']:
3924 customize_script = """
3925 #!/bin/bash
3926 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3927 if [ "$1" = "precustomization" ];then
3928 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3929 if [ ! -d /root/.ssh ];then
3930 mkdir /root/.ssh
3931 chown root:root /root/.ssh
3932 chmod 700 /root/.ssh
3933 touch /root/.ssh/authorized_keys
3934 chown root:root /root/.ssh/authorized_keys
3935 chmod 600 /root/.ssh/authorized_keys
3936 # make centos with selinux happy
3937 which restorecon && restorecon -Rv /root/.ssh
3938 echo '{key}' >> /root/.ssh/authorized_keys
3939 else
3940 touch /root/.ssh/authorized_keys
3941 chown root:root /root/.ssh/authorized_keys
3942 chmod 600 /root/.ssh/authorized_keys
3943 echo '{key}' >> /root/.ssh/authorized_keys
3944 fi
3945 if [ -d /home/{user_name} ];then
3946 if [ ! -d /home/{user_name}/.ssh ];then
3947 mkdir /home/{user_name}/.ssh
3948 chown {user_name}:{user_name} /home/{user_name}/.ssh
3949 chmod 700 /home/{user_name}/.ssh
3950 touch /home/{user_name}/.ssh/authorized_keys
3951 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3952 chmod 600 /home/{user_name}/.ssh/authorized_keys
3953 # make centos with selinux happy
3954 which restorecon && restorecon -Rv /home/{user_name}/.ssh
3955 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3956 else
3957 touch /home/{user_name}/.ssh/authorized_keys
3958 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3959 chmod 600 /home/{user_name}/.ssh/authorized_keys
3960 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3961 fi
3962 fi
3963 fi""".format(key=key, user_name=user_name, user_key=user_key)
3964
3965 for vm in vapp._get_vms():
3966 vm_name = vm.name
3967 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
3968 if isinstance(task, GenericTask):
3969 vca.block_until_completed(task)
3970 self.logger.info("cloud_init : customized guest os task "\
3971 "completed for VM {}".format(vm_name))
3972 else:
3973 self.logger.error("cloud_init : task for customized guest os"\
3974 "failed for VM {}".format(vm_name))
3975 except Exception as exp:
3976 self.logger.error("cloud_init : exception occurred while injecting "\
3977 "ssh-key")
3978 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
3979 "ssh-key".format(exp))
3980
3981
3982 def add_new_disk(self, vca, vapp_uuid, disk_size):
3983 """
3984 Method to create an empty vm disk
3985
3986 Args:
3987 vapp_uuid - is vapp identifier.
3988 disk_size - size of disk to be created in GB
3989
3990 Returns:
3991 None
3992 """
3993 status = False
3994 vm_details = None
3995 try:
3996 #Disk size in GB, convert it into MB
3997 if disk_size is not None:
3998 disk_size_mb = int(disk_size) * 1024
3999 vm_details = self.get_vapp_details_rest(vapp_uuid)
4000
4001 if vm_details and "vm_virtual_hardware" in vm_details:
4002 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4003 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4004 status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
4005
4006 except Exception as exp:
4007 msg = "Error occurred while creating new disk {}.".format(exp)
4008 self.rollback_newvm(vapp_uuid, msg)
4009
4010 if status:
4011 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4012 else:
4013 #If failed to add disk, delete VM
4014 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4015 self.rollback_newvm(vapp_uuid, msg)
4016
4017
4018 def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
4019 """
4020 Retrives vApp Disks section & add new empty disk
4021
4022 Args:
4023 disk_href: Disk section href to addd disk
4024 disk_size_mb: Disk size in MB
4025
4026 Returns: Status of add new disk task
4027 """
4028 status = False
4029 if vca.vcloud_session and vca.vcloud_session.organization:
4030 response = Http.get(url=disk_href,
4031 headers=vca.vcloud_session.get_vcloud_headers(),
4032 verify=vca.verify,
4033 logger=vca.logger)
4034
4035 if response.status_code != requests.codes.ok:
4036 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4037 .format(disk_href, response.status_code))
4038 return status
4039 try:
4040 #Find but type & max of instance IDs assigned to disks
4041 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4042 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4043 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4044 instance_id = 0
4045 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4046 if item.find("rasd:Description",namespaces).text == "Hard disk":
4047 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4048 if inst_id > instance_id:
4049 instance_id = inst_id
4050 disk_item = item.find("rasd:HostResource" ,namespaces)
4051 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4052 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4053
4054 instance_id = instance_id + 1
4055 new_item = """<Item>
4056 <rasd:Description>Hard disk</rasd:Description>
4057 <rasd:ElementName>New disk</rasd:ElementName>
4058 <rasd:HostResource
4059 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4060 vcloud:capacity="{}"
4061 vcloud:busSubType="{}"
4062 vcloud:busType="{}"></rasd:HostResource>
4063 <rasd:InstanceID>{}</rasd:InstanceID>
4064 <rasd:ResourceType>17</rasd:ResourceType>
4065 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4066
4067 new_data = response.content
4068 #Add new item at the bottom
4069 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4070
4071 # Send PUT request to modify virtual hardware section with new disk
4072 headers = vca.vcloud_session.get_vcloud_headers()
4073 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4074
4075 response = Http.put(url=disk_href,
4076 data=new_data,
4077 headers=headers,
4078 verify=vca.verify, logger=self.logger)
4079
4080 if response.status_code != 202:
4081 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4082 .format(disk_href, response.status_code, response.content))
4083 else:
4084 add_disk_task = taskType.parseString(response.content, True)
4085 if type(add_disk_task) is GenericTask:
4086 status = vca.block_until_completed(add_disk_task)
4087 if not status:
4088 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4089
4090 except Exception as exp:
4091 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4092
4093 return status
4094
4095
4096 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4097 """
4098 Method to add existing disk to vm
4099 Args :
4100 catalogs - List of VDC catalogs
4101 image_id - Catalog ID
4102 template_name - Name of template in catalog
4103 vapp_uuid - UUID of vApp
4104 Returns:
4105 None
4106 """
4107 disk_info = None
4108 vcenter_conect, content = self.get_vcenter_content()
4109 #find moref-id of vm in image
4110 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4111 image_id=image_id,
4112 )
4113
4114 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4115 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4116 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4117 if catalog_vm_moref_id:
4118 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4119 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4120 if catalog_vm_obj:
4121 #find existing disk
4122 disk_info = self.find_disk(catalog_vm_obj)
4123 else:
4124 exp_msg = "No VM with image id {} found".format(image_id)
4125 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4126 else:
4127 exp_msg = "No Image found with image ID {} ".format(image_id)
4128 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4129
4130 if disk_info:
4131 self.logger.info("Existing disk_info : {}".format(disk_info))
4132 #get VM
4133 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4134 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4135 if vm_obj:
4136 status = self.add_disk(vcenter_conect=vcenter_conect,
4137 vm=vm_obj,
4138 disk_info=disk_info,
4139 size=size,
4140 vapp_uuid=vapp_uuid
4141 )
4142 if status:
4143 self.logger.info("Disk from image id {} added to {}".format(image_id,
4144 vm_obj.config.name)
4145 )
4146 else:
4147 msg = "No disk found with image id {} to add in VM {}".format(
4148 image_id,
4149 vm_obj.config.name)
4150 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4151
4152
4153 def find_disk(self, vm_obj):
4154 """
4155 Method to find details of existing disk in VM
4156 Args :
4157 vm_obj - vCenter object of VM
4158 image_id - Catalog ID
4159 Returns:
4160 disk_info : dict of disk details
4161 """
4162 disk_info = {}
4163 if vm_obj:
4164 try:
4165 devices = vm_obj.config.hardware.device
4166 for device in devices:
4167 if type(device) is vim.vm.device.VirtualDisk:
4168 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4169 disk_info["full_path"] = device.backing.fileName
4170 disk_info["datastore"] = device.backing.datastore
4171 disk_info["capacityKB"] = device.capacityInKB
4172 break
4173 except Exception as exp:
4174 self.logger.error("find_disk() : exception occurred while "\
4175 "getting existing disk details :{}".format(exp))
4176 return disk_info
4177
4178
4179 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4180 """
4181 Method to add existing disk in VM
4182 Args :
4183 vcenter_conect - vCenter content object
4184 vm - vCenter vm object
4185 disk_info : dict of disk details
4186 Returns:
4187 status : status of add disk task
4188 """
4189 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4190 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4191 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4192 if size is not None:
4193 #Convert size from GB to KB
4194 sizeKB = int(size) * 1024 * 1024
4195 #compare size of existing disk and user given size.Assign whicherver is greater
4196 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4197 sizeKB, capacityKB))
4198 if sizeKB > capacityKB:
4199 capacityKB = sizeKB
4200
4201 if datastore and fullpath and capacityKB:
4202 try:
4203 spec = vim.vm.ConfigSpec()
4204 # get all disks on a VM, set unit_number to the next available
4205 unit_number = 0
4206 for dev in vm.config.hardware.device:
4207 if hasattr(dev.backing, 'fileName'):
4208 unit_number = int(dev.unitNumber) + 1
4209 # unit_number 7 reserved for scsi controller
4210 if unit_number == 7:
4211 unit_number += 1
4212 if isinstance(dev, vim.vm.device.VirtualDisk):
4213 #vim.vm.device.VirtualSCSIController
4214 controller_key = dev.controllerKey
4215
4216 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4217 unit_number, controller_key))
4218 # add disk here
4219 dev_changes = []
4220 disk_spec = vim.vm.device.VirtualDeviceSpec()
4221 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4222 disk_spec.device = vim.vm.device.VirtualDisk()
4223 disk_spec.device.backing = \
4224 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4225 disk_spec.device.backing.thinProvisioned = True
4226 disk_spec.device.backing.diskMode = 'persistent'
4227 disk_spec.device.backing.datastore = datastore
4228 disk_spec.device.backing.fileName = fullpath
4229
4230 disk_spec.device.unitNumber = unit_number
4231 disk_spec.device.capacityInKB = capacityKB
4232 disk_spec.device.controllerKey = controller_key
4233 dev_changes.append(disk_spec)
4234 spec.deviceChange = dev_changes
4235 task = vm.ReconfigVM_Task(spec=spec)
4236 status = self.wait_for_vcenter_task(task, vcenter_conect)
4237 return status
4238 except Exception as exp:
4239 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4240 "{} to vm {}".format(exp,
4241 fullpath,
4242 vm.config.name)
4243 self.rollback_newvm(vapp_uuid, exp_msg)
4244 else:
4245 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4246 self.rollback_newvm(vapp_uuid, msg)
4247
4248
4249 def get_vcenter_content(self):
4250 """
4251 Get the vsphere content object
4252 """
4253 try:
4254 vm_vcenter_info = self.get_vm_vcenter_info()
4255 except Exception as exp:
4256 self.logger.error("Error occurred while getting vCenter infromationn"\
4257 " for VM : {}".format(exp))
4258 raise vimconn.vimconnException(message=exp)
4259
4260 context = None
4261 if hasattr(ssl, '_create_unverified_context'):
4262 context = ssl._create_unverified_context()
4263
4264 vcenter_conect = SmartConnect(
4265 host=vm_vcenter_info["vm_vcenter_ip"],
4266 user=vm_vcenter_info["vm_vcenter_user"],
4267 pwd=vm_vcenter_info["vm_vcenter_password"],
4268 port=int(vm_vcenter_info["vm_vcenter_port"]),
4269 sslContext=context
4270 )
4271 atexit.register(Disconnect, vcenter_conect)
4272 content = vcenter_conect.RetrieveContent()
4273 return vcenter_conect, content
4274
4275
4276 def get_vm_moref_id(self, vapp_uuid):
4277 """
4278 Get the moref_id of given VM
4279 """
4280 try:
4281 if vapp_uuid:
4282 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4283 if vm_details and "vm_vcenter_info" in vm_details:
4284 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4285
4286 return vm_moref_id
4287
4288 except Exception as exp:
4289 self.logger.error("Error occurred while getting VM moref ID "\
4290 " for VM : {}".format(exp))
4291 return None
4292
4293
4294 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4295 """
4296 Method to get vApp template details
4297 Args :
4298 catalogs - list of VDC catalogs
4299 image_id - Catalog ID to find
4300 template_name : template name in catalog
4301 Returns:
4302 parsed_respond : dict of vApp tempalte details
4303 """
4304 parsed_response = {}
4305
4306 vca = self.connect_as_admin()
4307 if not vca:
4308 raise vimconn.vimconnConnectionException("self.connect() is failed")
4309
4310 try:
4311 catalog = self.get_catalog_obj(image_id, catalogs)
4312 if catalog:
4313 template_name = self.get_catalogbyid(image_id, catalogs)
4314 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4315 if len(catalog_items) == 1:
4316 response = Http.get(catalog_items[0].get_href(),
4317 headers=vca.vcloud_session.get_vcloud_headers(),
4318 verify=vca.verify,
4319 logger=vca.logger)
4320 catalogItem = XmlElementTree.fromstring(response.content)
4321 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4322 vapp_tempalte_href = entity.get("href")
4323 #get vapp details and parse moref id
4324
4325 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4326 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4327 'vmw': 'http://www.vmware.com/schema/ovf',
4328 'vm': 'http://www.vmware.com/vcloud/v1.5',
4329 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4330 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4331 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4332 }
4333
4334 if vca.vcloud_session and vca.vcloud_session.organization:
4335 response = Http.get(url=vapp_tempalte_href,
4336 headers=vca.vcloud_session.get_vcloud_headers(),
4337 verify=vca.verify,
4338 logger=vca.logger
4339 )
4340
4341 if response.status_code != requests.codes.ok:
4342 self.logger.debug("REST API call {} failed. Return status code {}".format(
4343 vapp_tempalte_href, response.status_code))
4344
4345 else:
4346 xmlroot_respond = XmlElementTree.fromstring(response.content)
4347 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4348 if children_section is not None:
4349 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4350 if vCloud_extension_section is not None:
4351 vm_vcenter_info = {}
4352 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4353 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4354 if vmext is not None:
4355 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4356 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4357
4358 except Exception as exp :
4359 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4360
4361 return parsed_response
4362
4363
4364 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4365 """
4366 Method to delete vApp
4367 Args :
4368 vapp_uuid - vApp UUID
4369 msg - Error message to be logged
4370 exp_type : Exception type
4371 Returns:
4372 None
4373 """
4374 if vapp_uuid:
4375 status = self.delete_vminstance(vapp_uuid)
4376 else:
4377 msg = "No vApp ID"
4378 self.logger.error(msg)
4379 if exp_type == "Genric":
4380 raise vimconn.vimconnException(msg)
4381 elif exp_type == "NotFound":
4382 raise vimconn.vimconnNotFoundException(message=msg)
4383
4384 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4385 """
4386 Method to attach SRIOV adapters to VM
4387
4388 Args:
4389 vapp_uuid - uuid of vApp/VM
4390 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4391 vmname_andid - vmname
4392
4393 Returns:
4394 The status of add SRIOV adapter task , vm object and
4395 vcenter_conect object
4396 """
4397 vm_obj = None
4398 vcenter_conect, content = self.get_vcenter_content()
4399 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4400
4401 if vm_moref_id:
4402 try:
4403 no_of_sriov_devices = len(sriov_nets)
4404 if no_of_sriov_devices > 0:
4405 #Get VM and its host
4406 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4407 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4408 if host_obj and vm_obj:
4409 #get SRIOV devies from host on which vapp is currently installed
4410 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4411 no_of_sriov_devices,
4412 )
4413
4414 if len(avilable_sriov_devices) == 0:
4415 #find other hosts with active pci devices
4416 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4417 content,
4418 no_of_sriov_devices,
4419 )
4420
4421 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4422 #Migrate vm to the host where SRIOV devices are available
4423 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4424 new_host_obj))
4425 task = self.relocate_vm(new_host_obj, vm_obj)
4426 if task is not None:
4427 result = self.wait_for_vcenter_task(task, vcenter_conect)
4428 self.logger.info("Migrate VM status: {}".format(result))
4429 host_obj = new_host_obj
4430 else:
4431 self.logger.info("Fail to migrate VM : {}".format(result))
4432 raise vimconn.vimconnNotFoundException(
4433 "Fail to migrate VM : {} to host {}".format(
4434 vmname_andid,
4435 new_host_obj)
4436 )
4437
4438 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4439 #Add SRIOV devices one by one
4440 for sriov_net in sriov_nets:
4441 network_name = sriov_net.get('net_id')
4442 dvs_portgr_name = self.create_dvPort_group(network_name)
4443 if sriov_net.get('type') == "VF":
4444 #add vlan ID ,Modify portgroup for vlan ID
4445 self.configure_vlanID(content, vcenter_conect, network_name)
4446
4447 task = self.add_sriov_to_vm(content,
4448 vm_obj,
4449 host_obj,
4450 network_name,
4451 avilable_sriov_devices[0]
4452 )
4453 if task:
4454 status= self.wait_for_vcenter_task(task, vcenter_conect)
4455 if status:
4456 self.logger.info("Added SRIOV {} to VM {}".format(
4457 no_of_sriov_devices,
4458 str(vm_obj)))
4459 else:
4460 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4461 no_of_sriov_devices,
4462 str(vm_obj)))
4463 raise vimconn.vimconnUnexpectedResponse(
4464 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4465 )
4466 return True, vm_obj, vcenter_conect
4467 else:
4468 self.logger.error("Currently there is no host with"\
4469 " {} number of avaialble SRIOV "\
4470 "VFs required for VM {}".format(
4471 no_of_sriov_devices,
4472 vmname_andid)
4473 )
4474 raise vimconn.vimconnNotFoundException(
4475 "Currently there is no host with {} "\
4476 "number of avaialble SRIOV devices required for VM {}".format(
4477 no_of_sriov_devices,
4478 vmname_andid))
4479 else:
4480 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4481
4482 except vmodl.MethodFault as error:
4483 self.logger.error("Error occurred while adding SRIOV {} ",error)
4484 return None, vm_obj, vcenter_conect
4485
4486
4487 def get_sriov_devices(self,host, no_of_vfs):
4488 """
4489 Method to get the details of SRIOV devices on given host
4490 Args:
4491 host - vSphere host object
4492 no_of_vfs - number of VFs needed on host
4493
4494 Returns:
4495 array of SRIOV devices
4496 """
4497 sriovInfo=[]
4498 if host:
4499 for device in host.config.pciPassthruInfo:
4500 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4501 if device.numVirtualFunction >= no_of_vfs:
4502 sriovInfo.append(device)
4503 break
4504 return sriovInfo
4505
4506
4507 def get_host_and_sriov_devices(self, content, no_of_vfs):
4508 """
4509 Method to get the details of SRIOV devices infromation on all hosts
4510
4511 Args:
4512 content - vSphere host object
4513 no_of_vfs - number of pci VFs needed on host
4514
4515 Returns:
4516 array of SRIOV devices and host object
4517 """
4518 host_obj = None
4519 sriov_device_objs = None
4520 try:
4521 if content:
4522 container = content.viewManager.CreateContainerView(content.rootFolder,
4523 [vim.HostSystem], True)
4524 for host in container.view:
4525 devices = self.get_sriov_devices(host, no_of_vfs)
4526 if devices:
4527 host_obj = host
4528 sriov_device_objs = devices
4529 break
4530 except Exception as exp:
4531 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4532
4533 return host_obj,sriov_device_objs
4534
4535
4536 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4537 """
4538 Method to add SRIOV adapter to vm
4539
4540 Args:
4541 host_obj - vSphere host object
4542 vm_obj - vSphere vm object
4543 content - vCenter content object
4544 network_name - name of distributed virtaul portgroup
4545 sriov_device - SRIOV device info
4546
4547 Returns:
4548 task object
4549 """
4550 devices = []
4551 vnic_label = "sriov nic"
4552 try:
4553 dvs_portgr = self.get_dvport_group(network_name)
4554 network_name = dvs_portgr.name
4555 nic = vim.vm.device.VirtualDeviceSpec()
4556 # VM device
4557 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4558 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4559 nic.device.addressType = 'assigned'
4560 #nic.device.key = 13016
4561 nic.device.deviceInfo = vim.Description()
4562 nic.device.deviceInfo.label = vnic_label
4563 nic.device.deviceInfo.summary = network_name
4564 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4565
4566 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4567 nic.device.backing.deviceName = network_name
4568 nic.device.backing.useAutoDetect = False
4569 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4570 nic.device.connectable.startConnected = True
4571 nic.device.connectable.allowGuestControl = True
4572
4573 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4574 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4575 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4576
4577 devices.append(nic)
4578 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4579 task = vm_obj.ReconfigVM_Task(vmconf)
4580 return task
4581 except Exception as exp:
4582 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4583 return None
4584
4585
4586 def create_dvPort_group(self, network_name):
4587 """
4588 Method to create disributed virtual portgroup
4589
4590 Args:
4591 network_name - name of network/portgroup
4592
4593 Returns:
4594 portgroup key
4595 """
4596 try:
4597 new_network_name = [network_name, '-', str(uuid.uuid4())]
4598 network_name=''.join(new_network_name)
4599 vcenter_conect, content = self.get_vcenter_content()
4600
4601 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4602 if dv_switch:
4603 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4604 dv_pg_spec.name = network_name
4605
4606 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4607 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4608 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4609 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4610 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4611 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4612
4613 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4614 self.wait_for_vcenter_task(task, vcenter_conect)
4615
4616 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4617 if dvPort_group:
4618 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4619 return dvPort_group.key
4620 else:
4621 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4622
4623 except Exception as exp:
4624 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4625 " : {}".format(network_name, exp))
4626 return None
4627
4628 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4629 """
4630 Method to reconfigure disributed virtual portgroup
4631
4632 Args:
4633 dvPort_group_name - name of disributed virtual portgroup
4634 content - vCenter content object
4635 config_info - disributed virtual portgroup configuration
4636
4637 Returns:
4638 task object
4639 """
4640 try:
4641 dvPort_group = self.get_dvport_group(dvPort_group_name)
4642 if dvPort_group:
4643 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4644 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4645 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4646 if "vlanID" in config_info:
4647 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4648 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4649
4650 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4651 return task
4652 else:
4653 return None
4654 except Exception as exp:
4655 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4656 " : {}".format(dvPort_group_name, exp))
4657 return None
4658
4659
4660 def destroy_dvport_group(self , dvPort_group_name):
4661 """
4662 Method to destroy disributed virtual portgroup
4663
4664 Args:
4665 network_name - name of network/portgroup
4666
4667 Returns:
4668 True if portgroup successfully got deleted else false
4669 """
4670 vcenter_conect, content = self.get_vcenter_content()
4671 try:
4672 status = None
4673 dvPort_group = self.get_dvport_group(dvPort_group_name)
4674 if dvPort_group:
4675 task = dvPort_group.Destroy_Task()
4676 status = self.wait_for_vcenter_task(task, vcenter_conect)
4677 return status
4678 except vmodl.MethodFault as exp:
4679 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4680 exp, dvPort_group_name))
4681 return None
4682
4683
4684 def get_dvport_group(self, dvPort_group_name):
4685 """
4686 Method to get disributed virtual portgroup
4687
4688 Args:
4689 network_name - name of network/portgroup
4690
4691 Returns:
4692 portgroup object
4693 """
4694 vcenter_conect, content = self.get_vcenter_content()
4695 dvPort_group = None
4696 try:
4697 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4698 for item in container.view:
4699 if item.key == dvPort_group_name:
4700 dvPort_group = item
4701 break
4702 return dvPort_group
4703 except vmodl.MethodFault as exp:
4704 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4705 exp, dvPort_group_name))
4706 return None
4707
4708 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4709 """
4710 Method to get disributed virtual portgroup vlanID
4711
4712 Args:
4713 network_name - name of network/portgroup
4714
4715 Returns:
4716 vlan ID
4717 """
4718 vlanId = None
4719 try:
4720 dvPort_group = self.get_dvport_group(dvPort_group_name)
4721 if dvPort_group:
4722 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4723 except vmodl.MethodFault as exp:
4724 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4725 exp, dvPort_group_name))
4726 return vlanId
4727
4728
4729 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4730 """
4731 Method to configure vlanID in disributed virtual portgroup vlanID
4732
4733 Args:
4734 network_name - name of network/portgroup
4735
4736 Returns:
4737 None
4738 """
4739 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4740 if vlanID == 0:
4741 #configure vlanID
4742 vlanID = self.genrate_vlanID(dvPort_group_name)
4743 config = {"vlanID":vlanID}
4744 task = self.reconfig_portgroup(content, dvPort_group_name,
4745 config_info=config)
4746 if task:
4747 status= self.wait_for_vcenter_task(task, vcenter_conect)
4748 if status:
4749 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4750 dvPort_group_name,vlanID))
4751 else:
4752 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4753 dvPort_group_name, vlanID))
4754
4755
4756 def genrate_vlanID(self, network_name):
4757 """
4758 Method to get unused vlanID
4759 Args:
4760 network_name - name of network/portgroup
4761 Returns:
4762 vlanID
4763 """
4764 vlan_id = None
4765 used_ids = []
4766 if self.config.get('vlanID_range') == None:
4767 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4768 "at config value before creating sriov network with vlan tag")
4769 if "used_vlanIDs" not in self.persistent_info:
4770 self.persistent_info["used_vlanIDs"] = {}
4771 else:
4772 used_ids = self.persistent_info["used_vlanIDs"].values()
4773
4774 for vlanID_range in self.config.get('vlanID_range'):
4775 start_vlanid , end_vlanid = vlanID_range.split("-")
4776 if start_vlanid > end_vlanid:
4777 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4778 vlanID_range))
4779
4780 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4781 if id not in used_ids:
4782 vlan_id = id
4783 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4784 return vlan_id
4785 if vlan_id is None:
4786 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4787
4788
4789 def get_obj(self, content, vimtype, name):
4790 """
4791 Get the vsphere object associated with a given text name
4792 """
4793 obj = None
4794 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4795 for item in container.view:
4796 if item.name == name:
4797 obj = item
4798 break
4799 return obj
4800
4801
4802 def insert_media_to_vm(self, vapp, image_id):
4803 """
4804 Method to insert media CD-ROM (ISO image) from catalog to vm.
4805 vapp - vapp object to get vm id
4806 Image_id - image id for cdrom to be inerted to vm
4807 """
4808 # create connection object
4809 vca = self.connect()
4810 try:
4811 # fetching catalog details
4812 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
4813 response = Http.get(url=rest_url,
4814 headers=vca.vcloud_session.get_vcloud_headers(),
4815 verify=vca.verify,
4816 logger=vca.logger)
4817
4818 if response.status_code != 200:
4819 self.logger.error("REST call {} failed reason : {}"\
4820 "status code : {}".format(url_rest_call,
4821 response.content,
4822 response.status_code))
4823 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
4824 "catalog details")
4825 # searching iso name and id
4826 iso_name,media_id = self.get_media_details(vca, response.content)
4827
4828 if iso_name and media_id:
4829 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4830 <ns6:MediaInsertOrEjectParams
4831 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
4832 <ns6:Media
4833 type="application/vnd.vmware.vcloud.media+xml"
4834 name="{}.iso"
4835 id="urn:vcloud:media:{}"
4836 href="https://{}/api/media/{}"/>
4837 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
4838 vca.host,media_id)
4839
4840 for vms in vapp._get_vms():
4841 vm_id = (vms.id).split(':')[-1]
4842
4843 headers = vca.vcloud_session.get_vcloud_headers()
4844 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
4845 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
4846
4847 response = Http.post(url=rest_url,
4848 headers=headers,
4849 data=data,
4850 verify=vca.verify,
4851 logger=vca.logger)
4852
4853 if response.status_code != 202:
4854 self.logger.error("Failed to insert CD-ROM to vm")
4855 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
4856 "ISO image to vm")
4857 else:
4858 task = taskType.parseString(response.content, True)
4859 if isinstance(task, GenericTask):
4860 vca.block_until_completed(task)
4861 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
4862 " image to vm {}".format(vm_id))
4863 except Exception as exp:
4864 self.logger.error("insert_media_to_vm() : exception occurred "\
4865 "while inserting media CD-ROM")
4866 raise vimconn.vimconnException(message=exp)
4867
4868
4869 def get_media_details(self, vca, content):
4870 """
4871 Method to get catalog item details
4872 vca - connection object
4873 content - Catalog details
4874 Return - Media name, media id
4875 """
4876 cataloghref_list = []
4877 try:
4878 if content:
4879 vm_list_xmlroot = XmlElementTree.fromstring(content)
4880 for child in vm_list_xmlroot.iter():
4881 if 'CatalogItem' in child.tag:
4882 cataloghref_list.append(child.attrib.get('href'))
4883 if cataloghref_list is not None:
4884 for href in cataloghref_list:
4885 if href:
4886 response = Http.get(url=href,
4887 headers=vca.vcloud_session.get_vcloud_headers(),
4888 verify=vca.verify,
4889 logger=vca.logger)
4890 if response.status_code != 200:
4891 self.logger.error("REST call {} failed reason : {}"\
4892 "status code : {}".format(href,
4893 response.content,
4894 response.status_code))
4895 raise vimconn.vimconnException("get_media_details : Failed to get "\
4896 "catalogitem details")
4897 list_xmlroot = XmlElementTree.fromstring(response.content)
4898 for child in list_xmlroot.iter():
4899 if 'Entity' in child.tag:
4900 if 'media' in child.attrib.get('href'):
4901 name = child.attrib.get('name')
4902 media_id = child.attrib.get('href').split('/').pop()
4903 return name,media_id
4904 else:
4905 self.logger.debug("Media name and id not found")
4906 return False,False
4907 except Exception as exp:
4908 self.logger.error("get_media_details : exception occurred "\
4909 "getting media details")
4910 raise vimconn.vimconnException(message=exp)
4911