Changes in vimconn_vmware.py: Removed multiple connects to reduce instantiation time...
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """Delete a tenant from VIM"""
431 'Returns the tenant identifier'
432 raise vimconn.vimconnNotImplemented("Should have implemented this")
433
434 def get_tenant_list(self, filter_dict={}):
435 """Obtain tenants of VIM
436 filter_dict can contain the following keys:
437 name: filter by tenant name
438 id: filter by tenant uuid/id
439 <other VIM specific>
440 Returns the tenant list of dictionaries:
441 [{'name':'<name>, 'id':'<id>, ...}, ...]
442
443 """
444 org_dict = self.get_org(self.org_uuid)
445 vdcs_dict = org_dict['vdcs']
446
447 vdclist = []
448 try:
449 for k in vdcs_dict:
450 entry = {'name': vdcs_dict[k], 'id': k}
451 # if caller didn't specify dictionary we return all tenants.
452 if filter_dict is not None and filter_dict:
453 filtered_entry = entry.copy()
454 filtered_dict = set(entry.keys()) - set(filter_dict)
455 for unwanted_key in filtered_dict: del entry[unwanted_key]
456 if filter_dict == entry:
457 vdclist.append(filtered_entry)
458 else:
459 vdclist.append(entry)
460 except:
461 self.logger.debug("Error in get_tenant_list()")
462 self.logger.debug(traceback.format_exc())
463 raise vimconn.vimconnException("Incorrect state. {}")
464
465 return vdclist
466
467 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
468 """Adds a tenant network to VIM
469 net_name is the name
470 net_type can be 'bridge','data'.'ptp'.
471 ip_profile is a dict containing the IP parameters of the network
472 shared is a boolean
473 Returns the network identifier"""
474
475 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
476 .format(net_name, net_type, ip_profile, shared))
477
478 isshared = 'false'
479 if shared:
480 isshared = 'true'
481
482 # ############# Stub code for SRIOV #################
483 # if net_type == "data" or net_type == "ptp":
484 # if self.config.get('dv_switch_name') == None:
485 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
486 # network_uuid = self.create_dvPort_group(net_name)
487
488 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
489 ip_profile=ip_profile, isshared=isshared)
490 if network_uuid is not None:
491 return network_uuid
492 else:
493 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
494
495 def get_vcd_network_list(self):
496 """ Method available organization for a logged in tenant
497
498 Returns:
499 The return vca object that letter can be used to connect to vcloud direct as admin
500 """
501
502 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
503
504 if not self.tenant_name:
505 raise vimconn.vimconnConnectionException("Tenant name is empty.")
506
507 vdc = self.get_vdc_details()
508 if vdc is None:
509 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
510
511 vdc_uuid = vdc.get_id().split(":")[3]
512 networks = self.vca.get_networks(vdc.get_name())
513 network_list = []
514 try:
515 for network in networks:
516 filter_dict = {}
517 netid = network.get_id().split(":")
518 if len(netid) != 4:
519 continue
520
521 filter_dict["name"] = network.get_name()
522 filter_dict["id"] = netid[3]
523 filter_dict["shared"] = network.get_IsShared()
524 filter_dict["tenant_id"] = vdc_uuid
525 if network.get_status() == 1:
526 filter_dict["admin_state_up"] = True
527 else:
528 filter_dict["admin_state_up"] = False
529 filter_dict["status"] = "ACTIVE"
530 filter_dict["type"] = "bridge"
531 network_list.append(filter_dict)
532 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
533 except:
534 self.logger.debug("Error in get_vcd_network_list")
535 self.logger.debug(traceback.format_exc())
536 pass
537
538 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
539 return network_list
540
541 def get_network_list(self, filter_dict={}):
542 """Obtain tenant networks of VIM
543 Filter_dict can be:
544 name: network name OR/AND
545 id: network uuid OR/AND
546 shared: boolean OR/AND
547 tenant_id: tenant OR/AND
548 admin_state_up: boolean
549 status: 'ACTIVE'
550
551 [{key : value , key : value}]
552
553 Returns the network list of dictionaries:
554 [{<the fields at Filter_dict plus some VIM specific>}, ...]
555 List can be empty
556 """
557
558 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
559
560 if not self.tenant_name:
561 raise vimconn.vimconnConnectionException("Tenant name is empty.")
562
563 vdc = self.get_vdc_details()
564 if vdc is None:
565 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
566
567 try:
568 vdcid = vdc.get_id().split(":")[3]
569 networks = self.vca.get_networks(vdc.get_name())
570 network_list = []
571
572 for network in networks:
573 filter_entry = {}
574 net_uuid = network.get_id().split(":")
575 if len(net_uuid) != 4:
576 continue
577 else:
578 net_uuid = net_uuid[3]
579 # create dict entry
580 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
581 vdcid,
582 network.get_name()))
583 filter_entry["name"] = network.get_name()
584 filter_entry["id"] = net_uuid
585 filter_entry["shared"] = network.get_IsShared()
586 filter_entry["tenant_id"] = vdcid
587 if network.get_status() == 1:
588 filter_entry["admin_state_up"] = True
589 else:
590 filter_entry["admin_state_up"] = False
591 filter_entry["status"] = "ACTIVE"
592 filter_entry["type"] = "bridge"
593 filtered_entry = filter_entry.copy()
594
595 if filter_dict is not None and filter_dict:
596 # we remove all the key : value we don't care and match only
597 # respected field
598 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
599 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
600 if filter_dict == filter_entry:
601 network_list.append(filtered_entry)
602 else:
603 network_list.append(filtered_entry)
604 except:
605 self.logger.debug("Error in get_vcd_network_list")
606 self.logger.debug(traceback.format_exc())
607
608 self.logger.debug("Returning {}".format(network_list))
609 return network_list
610
611 def get_network(self, net_id):
612 """Method obtains network details of net_id VIM network
613 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
614
615 try:
616 vdc = self.get_vdc_details()
617 vdc_id = vdc.get_id().split(":")[3]
618
619 networks = self.vca.get_networks(vdc.get_name())
620 filter_dict = {}
621
622 for network in networks:
623 vdc_network_id = network.get_id().split(":")
624 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
625 filter_dict["name"] = network.get_name()
626 filter_dict["id"] = vdc_network_id[3]
627 filter_dict["shared"] = network.get_IsShared()
628 filter_dict["tenant_id"] = vdc_id
629 if network.get_status() == 1:
630 filter_dict["admin_state_up"] = True
631 else:
632 filter_dict["admin_state_up"] = False
633 filter_dict["status"] = "ACTIVE"
634 filter_dict["type"] = "bridge"
635 self.logger.debug("Returning {}".format(filter_dict))
636 return filter_dict
637 except:
638 self.logger.debug("Error in get_network")
639 self.logger.debug(traceback.format_exc())
640
641 return filter_dict
642
643 def delete_network(self, net_id):
644 """
645 Method Deletes a tenant network from VIM, provide the network id.
646
647 Returns the network identifier or raise an exception
648 """
649
650 # ############# Stub code for SRIOV #################
651 # dvport_group = self.get_dvport_group(net_id)
652 # if dvport_group:
653 # #delete portgroup
654 # status = self.destroy_dvport_group(net_id)
655 # if status:
656 # # Remove vlanID from persistent info
657 # if net_id in self.persistent_info["used_vlanIDs"]:
658 # del self.persistent_info["used_vlanIDs"][net_id]
659 #
660 # return net_id
661
662 vcd_network = self.get_vcd_network(network_uuid=net_id)
663 if vcd_network is not None and vcd_network:
664 if self.delete_network_action(network_uuid=net_id):
665 return net_id
666 else:
667 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
668
669 def refresh_nets_status(self, net_list):
670 """Get the status of the networks
671 Params: the list of network identifiers
672 Returns a dictionary with:
673 net_id: #VIM id of this network
674 status: #Mandatory. Text with one of:
675 # DELETED (not found at vim)
676 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
677 # OTHER (Vim reported other status not understood)
678 # ERROR (VIM indicates an ERROR status)
679 # ACTIVE, INACTIVE, DOWN (admin down),
680 # BUILD (on building process)
681 #
682 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
683 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
684
685 """
686
687 dict_entry = {}
688 try:
689 for net in net_list:
690 errormsg = ''
691 vcd_network = self.get_vcd_network(network_uuid=net)
692 if vcd_network is not None and vcd_network:
693 if vcd_network['status'] == '1':
694 status = 'ACTIVE'
695 else:
696 status = 'DOWN'
697 else:
698 status = 'DELETED'
699 errormsg = 'Network not found.'
700
701 dict_entry[net] = {'status': status, 'error_msg': errormsg,
702 'vim_info': yaml.safe_dump(vcd_network)}
703 except:
704 self.logger.debug("Error in refresh_nets_status")
705 self.logger.debug(traceback.format_exc())
706
707 return dict_entry
708
709 def get_flavor(self, flavor_id):
710 """Obtain flavor details from the VIM
711 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
712 """
713 if flavor_id not in vimconnector.flavorlist:
714 raise vimconn.vimconnNotFoundException("Flavor not found.")
715 return vimconnector.flavorlist[flavor_id]
716
717 def new_flavor(self, flavor_data):
718 """Adds a tenant flavor to VIM
719 flavor_data contains a dictionary with information, keys:
720 name: flavor name
721 ram: memory (cloud type) in MBytes
722 vpcus: cpus (cloud type)
723 extended: EPA parameters
724 - numas: #items requested in same NUMA
725 memory: number of 1G huge pages memory
726 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
727 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
728 - name: interface name
729 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
730 bandwidth: X Gbps; requested guarantee bandwidth
731 vpci: requested virtual PCI address
732 disk: disk size
733 is_public:
734 #TODO to concrete
735 Returns the flavor identifier"""
736
737 # generate a new uuid put to internal dict and return it.
738 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
739 new_flavor=flavor_data
740 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
741 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
742 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
743
744 if not isinstance(ram, int):
745 raise vimconn.vimconnException("Non-integer value for ram")
746 elif not isinstance(cpu, int):
747 raise vimconn.vimconnException("Non-integer value for cpu")
748 elif not isinstance(disk, int):
749 raise vimconn.vimconnException("Non-integer value for disk")
750
751 extended_flv = flavor_data.get("extended")
752 if extended_flv:
753 numas=extended_flv.get("numas")
754 if numas:
755 for numa in numas:
756 #overwrite ram and vcpus
757 ram = numa['memory']*1024
758 if 'paired-threads' in numa:
759 cpu = numa['paired-threads']*2
760 elif 'cores' in numa:
761 cpu = numa['cores']
762 elif 'threads' in numa:
763 cpu = numa['threads']
764
765 new_flavor[FLAVOR_RAM_KEY] = ram
766 new_flavor[FLAVOR_VCPUS_KEY] = cpu
767 new_flavor[FLAVOR_DISK_KEY] = disk
768 # generate a new uuid put to internal dict and return it.
769 flavor_id = uuid.uuid4()
770 vimconnector.flavorlist[str(flavor_id)] = new_flavor
771 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
772
773 return str(flavor_id)
774
775 def delete_flavor(self, flavor_id):
776 """Deletes a tenant flavor from VIM identify by its id
777
778 Returns the used id or raise an exception
779 """
780 if flavor_id not in vimconnector.flavorlist:
781 raise vimconn.vimconnNotFoundException("Flavor not found.")
782
783 vimconnector.flavorlist.pop(flavor_id, None)
784 return flavor_id
785
786 def new_image(self, image_dict):
787 """
788 Adds a tenant image to VIM
789 Returns:
790 200, image-id if the image is created
791 <0, message if there is an error
792 """
793
794 return self.get_image_id_from_path(image_dict['location'])
795
796 def delete_image(self, image_id):
797 """
798
799 :param image_id:
800 :return:
801 """
802
803 raise vimconn.vimconnNotImplemented("Should have implemented this")
804
805 def catalog_exists(self, catalog_name, catalogs):
806 """
807
808 :param catalog_name:
809 :param catalogs:
810 :return:
811 """
812 for catalog in catalogs:
813 if catalog.name == catalog_name:
814 return True
815 return False
816
817 def create_vimcatalog(self, vca=None, catalog_name=None):
818 """ Create new catalog entry in vCloud director.
819
820 Args
821 vca: vCloud director.
822 catalog_name catalog that client wish to create. Note no validation done for a name.
823 Client must make sure that provide valid string representation.
824
825 Return (bool) True if catalog created.
826
827 """
828 try:
829 task = vca.create_catalog(catalog_name, catalog_name)
830 result = vca.block_until_completed(task)
831 if not result:
832 return False
833 catalogs = vca.get_catalogs()
834 except:
835 return False
836 return self.catalog_exists(catalog_name, catalogs)
837
838 # noinspection PyIncorrectDocstring
839 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
840 description='', progress=False, chunk_bytes=128 * 1024):
841 """
842 Uploads a OVF file to a vCloud catalog
843
844 :param chunk_bytes:
845 :param progress:
846 :param description:
847 :param image_name:
848 :param vca:
849 :param catalog_name: (str): The name of the catalog to upload the media.
850 :param media_file_name: (str): The name of the local media file to upload.
851 :return: (bool) True if the media file was successfully uploaded, false otherwise.
852 """
853 os.path.isfile(media_file_name)
854 statinfo = os.stat(media_file_name)
855
856 # find a catalog entry where we upload OVF.
857 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
858 # status change.
859 # if VCD can parse OVF we upload VMDK file
860 try:
861 for catalog in vca.get_catalogs():
862 if catalog_name != catalog.name:
863 continue
864 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
865 link.get_rel() == 'add', catalog.get_Link())
866 assert len(link) == 1
867 data = """
868 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
869 """ % (escape(catalog_name), escape(description))
870 headers = vca.vcloud_session.get_vcloud_headers()
871 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
872 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
873 if response.status_code == requests.codes.created:
874 catalogItem = XmlElementTree.fromstring(response.content)
875 entity = [child for child in catalogItem if
876 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
877 href = entity.get('href')
878 template = href
879 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
880 verify=vca.verify, logger=self.logger)
881
882 if response.status_code == requests.codes.ok:
883 media = mediaType.parseString(response.content, True)
884 link = filter(lambda link: link.get_rel() == 'upload:default',
885 media.get_Files().get_File()[0].get_Link())[0]
886 headers = vca.vcloud_session.get_vcloud_headers()
887 headers['Content-Type'] = 'Content-Type text/xml'
888 response = Http.put(link.get_href(),
889 data=open(media_file_name, 'rb'),
890 headers=headers,
891 verify=vca.verify, logger=self.logger)
892 if response.status_code != requests.codes.ok:
893 self.logger.debug(
894 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
895 media_file_name))
896 return False
897
898 # TODO fix this with aync block
899 time.sleep(5)
900
901 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
902
903 # uploading VMDK file
904 # check status of OVF upload and upload remaining files.
905 response = Http.get(template,
906 headers=vca.vcloud_session.get_vcloud_headers(),
907 verify=vca.verify,
908 logger=self.logger)
909
910 if response.status_code == requests.codes.ok:
911 media = mediaType.parseString(response.content, True)
912 number_of_files = len(media.get_Files().get_File())
913 for index in xrange(0, number_of_files):
914 links_list = filter(lambda link: link.get_rel() == 'upload:default',
915 media.get_Files().get_File()[index].get_Link())
916 for link in links_list:
917 # we skip ovf since it already uploaded.
918 if 'ovf' in link.get_href():
919 continue
920 # The OVF file and VMDK must be in a same directory
921 head, tail = os.path.split(media_file_name)
922 file_vmdk = head + '/' + link.get_href().split("/")[-1]
923 if not os.path.isfile(file_vmdk):
924 return False
925 statinfo = os.stat(file_vmdk)
926 if statinfo.st_size == 0:
927 return False
928 hrefvmdk = link.get_href()
929
930 if progress:
931 print("Uploading file: {}".format(file_vmdk))
932 if progress:
933 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
934 FileTransferSpeed()]
935 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
936
937 bytes_transferred = 0
938 f = open(file_vmdk, 'rb')
939 while bytes_transferred < statinfo.st_size:
940 my_bytes = f.read(chunk_bytes)
941 if len(my_bytes) <= chunk_bytes:
942 headers = vca.vcloud_session.get_vcloud_headers()
943 headers['Content-Range'] = 'bytes %s-%s/%s' % (
944 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
945 headers['Content-Length'] = str(len(my_bytes))
946 response = Http.put(hrefvmdk,
947 headers=headers,
948 data=my_bytes,
949 verify=vca.verify,
950 logger=None)
951
952 if response.status_code == requests.codes.ok:
953 bytes_transferred += len(my_bytes)
954 if progress:
955 progress_bar.update(bytes_transferred)
956 else:
957 self.logger.debug(
958 'file upload failed with error: [%s] %s' % (response.status_code,
959 response.content))
960
961 f.close()
962 return False
963 f.close()
964 if progress:
965 progress_bar.finish()
966 time.sleep(10)
967 return True
968 else:
969 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
970 format(catalog_name, media_file_name))
971 return False
972 except Exception as exp:
973 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
974 .format(catalog_name,media_file_name, exp))
975 raise vimconn.vimconnException(
976 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
977 .format(catalog_name,media_file_name, exp))
978
979 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
980 return False
981
982 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
983 """Upload media file"""
984 # TODO add named parameters for readability
985
986 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
987 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
988
989 def validate_uuid4(self, uuid_string=None):
990 """ Method validate correct format of UUID.
991
992 Return: true if string represent valid uuid
993 """
994 try:
995 val = uuid.UUID(uuid_string, version=4)
996 except ValueError:
997 return False
998 return True
999
1000 def get_catalogid(self, catalog_name=None, catalogs=None):
1001 """ Method check catalog and return catalog ID in UUID format.
1002
1003 Args
1004 catalog_name: catalog name as string
1005 catalogs: list of catalogs.
1006
1007 Return: catalogs uuid
1008 """
1009
1010 for catalog in catalogs:
1011 if catalog.name == catalog_name:
1012 catalog_id = catalog.get_id().split(":")
1013 return catalog_id[3]
1014 return None
1015
1016 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1017 """ Method check catalog and return catalog name lookup done by catalog UUID.
1018
1019 Args
1020 catalog_name: catalog name as string
1021 catalogs: list of catalogs.
1022
1023 Return: catalogs name or None
1024 """
1025
1026 if not self.validate_uuid4(uuid_string=catalog_uuid):
1027 return None
1028
1029 for catalog in catalogs:
1030 catalog_id = catalog.get_id().split(":")[3]
1031 if catalog_id == catalog_uuid:
1032 return catalog.name
1033 return None
1034
1035 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1036 """ Method check catalog and return catalog name lookup done by catalog UUID.
1037
1038 Args
1039 catalog_name: catalog name as string
1040 catalogs: list of catalogs.
1041
1042 Return: catalogs name or None
1043 """
1044
1045 if not self.validate_uuid4(uuid_string=catalog_uuid):
1046 return None
1047
1048 for catalog in catalogs:
1049 catalog_id = catalog.get_id().split(":")[3]
1050 if catalog_id == catalog_uuid:
1051 return catalog
1052 return None
1053
1054 def get_image_id_from_path(self, path=None, progress=False):
1055 """ Method upload OVF image to vCloud director.
1056
1057 Each OVF image represented as single catalog entry in vcloud director.
1058 The method check for existing catalog entry. The check done by file name without file extension.
1059
1060 if given catalog name already present method will respond with existing catalog uuid otherwise
1061 it will create new catalog entry and upload OVF file to newly created catalog.
1062
1063 If method can't create catalog entry or upload a file it will throw exception.
1064
1065 Method accept boolean flag progress that will output progress bar. It useful method
1066 for standalone upload use case. In case to test large file upload.
1067
1068 Args
1069 path: - valid path to OVF file.
1070 progress - boolean progress bar show progress bar.
1071
1072 Return: if image uploaded correct method will provide image catalog UUID.
1073 """
1074
1075 if not path:
1076 raise vimconn.vimconnException("Image path can't be None.")
1077
1078 if not os.path.isfile(path):
1079 raise vimconn.vimconnException("Can't read file. File not found.")
1080
1081 if not os.access(path, os.R_OK):
1082 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1083
1084 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1085
1086 dirpath, filename = os.path.split(path)
1087 flname, file_extension = os.path.splitext(path)
1088 if file_extension != '.ovf':
1089 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1090 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1091
1092 catalog_name = os.path.splitext(filename)[0]
1093 catalog_md5_name = hashlib.md5(path).hexdigest()
1094 self.logger.debug("File name {} Catalog Name {} file path {} "
1095 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1096
1097 try:
1098 catalogs = self.vca.get_catalogs()
1099 except Exception as exp:
1100 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1101 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1102
1103 if len(catalogs) == 0:
1104 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1105 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1106 if not result:
1107 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1108 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1109 media_name=filename, medial_file_name=path, progress=progress)
1110 if not result:
1111 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1112 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1113 else:
1114 for catalog in catalogs:
1115 # search for existing catalog if we find same name we return ID
1116 # TODO optimize this
1117 if catalog.name == catalog_md5_name:
1118 self.logger.debug("Found existing catalog entry for {} "
1119 "catalog id {}".format(catalog_name,
1120 self.get_catalogid(catalog_md5_name, catalogs)))
1121 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1122
1123 # if we didn't find existing catalog we create a new one and upload image.
1124 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1125 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1126 if not result:
1127 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1128
1129 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1130 media_name=filename, medial_file_name=path, progress=progress)
1131 if not result:
1132 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1133
1134 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1135
1136 def get_image_list(self, filter_dict={}):
1137 '''Obtain tenant images from VIM
1138 Filter_dict can be:
1139 name: image name
1140 id: image uuid
1141 checksum: image checksum
1142 location: image path
1143 Returns the image list of dictionaries:
1144 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1145 List can be empty
1146 '''
1147
1148 try:
1149 image_list = []
1150 catalogs = self.vca.get_catalogs()
1151 if len(catalogs) == 0:
1152 return image_list
1153 else:
1154 for catalog in catalogs:
1155 catalog_uuid = catalog.get_id().split(":")[3]
1156 name = catalog.name
1157 filtered_dict = {}
1158 if filter_dict.get("name") and filter_dict["name"] != name:
1159 continue
1160 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1161 continue
1162 filtered_dict ["name"] = name
1163 filtered_dict ["id"] = catalog_uuid
1164 image_list.append(filtered_dict)
1165
1166 self.logger.debug("List of already created catalog items: {}".format(image_list))
1167 return image_list
1168 except Exception as exp:
1169 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1170
1171 def get_vappid(self, vdc=None, vapp_name=None):
1172 """ Method takes vdc object and vApp name and returns vapp uuid or None
1173
1174 Args:
1175 vdc: The VDC object.
1176 vapp_name: is application vappp name identifier
1177
1178 Returns:
1179 The return vApp name otherwise None
1180 """
1181 if vdc is None or vapp_name is None:
1182 return None
1183 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1184 try:
1185 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1186 vdc.ResourceEntities.ResourceEntity)
1187 if len(refs) == 1:
1188 return refs[0].href.split("vapp")[1][1:]
1189 except Exception as e:
1190 self.logger.exception(e)
1191 return False
1192 return None
1193
1194 def check_vapp(self, vdc=None, vapp_uuid=None):
1195 """ Method Method returns True or False if vapp deployed in vCloud director
1196
1197 Args:
1198 vca: Connector to VCA
1199 vdc: The VDC object.
1200 vappid: vappid is application identifier
1201
1202 Returns:
1203 The return True if vApp deployed
1204 :param vdc:
1205 :param vapp_uuid:
1206 """
1207 try:
1208 refs = filter(lambda ref:
1209 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1210 vdc.ResourceEntities.ResourceEntity)
1211 for ref in refs:
1212 vappid = ref.href.split("vapp")[1][1:]
1213 # find vapp with respected vapp uuid
1214 if vappid == vapp_uuid:
1215 return True
1216 except Exception as e:
1217 self.logger.exception(e)
1218 return False
1219 return False
1220
1221 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1222 """Method returns vApp name from vCD and lookup done by vapp_id.
1223
1224 Args:
1225 vca: Connector to VCA
1226 vdc: The VDC object.
1227 vapp_uuid: vappid is application identifier
1228
1229 Returns:
1230 The return vApp name otherwise None
1231 """
1232
1233 try:
1234 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1235 vdc.ResourceEntities.ResourceEntity)
1236 for ref in refs:
1237 # we care only about UUID the rest doesn't matter
1238 vappid = ref.href.split("vapp")[1][1:]
1239 if vappid == vapp_uuid:
1240 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1241 logger=self.logger)
1242
1243 #Retry login if session expired & retry sending request
1244 if response.status_code == 403:
1245 response = self.retry_rest('GET', ref.href)
1246
1247 tree = XmlElementTree.fromstring(response.content)
1248 return tree.attrib['name']
1249 except Exception as e:
1250 self.logger.exception(e)
1251 return None
1252 return None
1253
1254 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1255 cloud_config=None, disk_list=None):
1256 """Adds a VM instance to VIM
1257 Params:
1258 start: indicates if VM must start or boot in pause mode. Ignored
1259 image_id,flavor_id: image and flavor uuid
1260 net_list: list of interfaces, each one is a dictionary with:
1261 name:
1262 net_id: network uuid to connect
1263 vpci: virtual vcpi to assign
1264 model: interface model, virtio, e2000, ...
1265 mac_address:
1266 use: 'data', 'bridge', 'mgmt'
1267 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1268 vim_id: filled/added by this function
1269 cloud_config: can be a text script to be passed directly to cloud-init,
1270 or an object to inject users and ssh keys with format:
1271 key-pairs: [] list of keys to install to the default user
1272 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1273 #TODO ip, security groups
1274 Returns >=0, the instance identifier
1275 <0, error_text
1276 """
1277
1278 self.logger.info("Creating new instance for entry {}".format(name))
1279 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1280 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1281
1282 #new vm name = vmname + tenant_id + uuid
1283 new_vm_name = [name, '-', str(uuid.uuid4())]
1284 vmname_andid = ''.join(new_vm_name)
1285
1286 # if vm already deployed we return existing uuid
1287 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1288 # if vapp_uuid is not None:
1289 # return vapp_uuid
1290
1291 # we check for presence of VDC, Catalog entry and Flavor.
1292 vdc = self.get_vdc_details()
1293 if vdc is None:
1294 raise vimconn.vimconnNotFoundException(
1295 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1296 catalogs = self.vca.get_catalogs()
1297 if catalogs is None:
1298 #Retry once, if failed by refreshing token
1299 self.get_token()
1300 catalogs = self.vca.get_catalogs()
1301 if catalogs is None:
1302 raise vimconn.vimconnNotFoundException(
1303 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1304
1305 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1306 if catalog_hash_name:
1307 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1308 else:
1309 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1310 "(Failed retrieve catalog information {})".format(name, image_id))
1311
1312
1313 # Set vCPU and Memory based on flavor.
1314 vm_cpus = None
1315 vm_memory = None
1316 vm_disk = None
1317
1318 if flavor_id is not None:
1319 if flavor_id not in vimconnector.flavorlist:
1320 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1321 "Failed retrieve flavor information "
1322 "flavor id {}".format(name, flavor_id))
1323 else:
1324 try:
1325 flavor = vimconnector.flavorlist[flavor_id]
1326 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1327 vm_memory = flavor[FLAVOR_RAM_KEY]
1328 vm_disk = flavor[FLAVOR_DISK_KEY]
1329 extended = flavor.get("extended", None)
1330 if extended:
1331 numas=extended.get("numas", None)
1332
1333 except Exception as exp:
1334 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1335
1336 # image upload creates template name as catalog name space Template.
1337 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1338 power_on = 'false'
1339 if start:
1340 power_on = 'true'
1341
1342 # client must provide at least one entry in net_list if not we report error
1343 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1344 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1345 primary_net = None
1346 primary_netname = None
1347 network_mode = 'bridged'
1348 if net_list is not None and len(net_list) > 0:
1349 for net in net_list:
1350 if 'use' in net and net['use'] == 'mgmt':
1351 primary_net = net
1352 if primary_net is None:
1353 primary_net = net_list[0]
1354
1355 try:
1356 primary_net_id = primary_net['net_id']
1357 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1358 if 'name' in network_dict:
1359 primary_netname = network_dict['name']
1360
1361 except KeyError:
1362 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1363 else:
1364 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1365
1366 # use: 'data', 'bridge', 'mgmt'
1367 # create vApp. Set vcpu and ram based on flavor id.
1368 try:
1369 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1370 self.get_catalogbyid(image_id, catalogs),
1371 network_name=None, # None while creating vapp
1372 network_mode=network_mode,
1373 vm_name=vmname_andid,
1374 vm_cpus=vm_cpus, # can be None if flavor is None
1375 vm_memory=vm_memory) # can be None if flavor is None
1376
1377 if vapptask is None or vapptask is False:
1378 self.get_token() # Retry getting token
1379 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1380 self.get_catalogbyid(image_id, catalogs),
1381 network_name=None, # None while creating vapp
1382 network_mode=network_mode,
1383 vm_name=vmname_andid,
1384 vm_cpus=vm_cpus, # can be None if flavor is None
1385 vm_memory=vm_memory) # can be None if flavor is None
1386
1387 if vapptask is None or vapptask is False:
1388 raise vimconn.vimconnUnexpectedResponse(
1389 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1390 if type(vapptask) is VappTask:
1391 self.vca.block_until_completed(vapptask)
1392
1393 except Exception as exp:
1394 raise vimconn.vimconnUnexpectedResponse(
1395 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1396
1397 # we should have now vapp in undeployed state.
1398 try:
1399 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1400
1401 except Exception as exp:
1402 raise vimconn.vimconnUnexpectedResponse(
1403 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1404 .format(vmname_andid, exp))
1405
1406 if vapp_uuid is None:
1407 raise vimconn.vimconnUnexpectedResponse(
1408 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1409 vmname_andid))
1410
1411 #Add PCI passthrough/SRIOV configrations
1412 vm_obj = None
1413 pci_devices_info = []
1414 sriov_net_info = []
1415 reserve_memory = False
1416
1417 for net in net_list:
1418 if net["type"]=="PF":
1419 pci_devices_info.append(net)
1420 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1421 sriov_net_info.append(net)
1422
1423 #Add PCI
1424 if len(pci_devices_info) > 0:
1425 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1426 vmname_andid ))
1427 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1428 pci_devices_info,
1429 vmname_andid)
1430 if PCI_devices_status:
1431 self.logger.info("Added PCI devives {} to VM {}".format(
1432 pci_devices_info,
1433 vmname_andid)
1434 )
1435 reserve_memory = True
1436 else:
1437 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1438 pci_devices_info,
1439 vmname_andid)
1440 )
1441
1442 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1443 # Modify vm disk
1444 if vm_disk:
1445 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1446 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1447 if result :
1448 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1449
1450 #Add new or existing disks to vApp
1451 if disk_list:
1452 added_existing_disk = False
1453 for disk in disk_list:
1454 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1455 image_id = disk['image_id']
1456 # Adding CD-ROM to VM
1457 # will revisit code once specification ready to support this feature
1458 self.insert_media_to_vm(vapp, image_id)
1459 elif "image_id" in disk and disk["image_id"] is not None:
1460 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1461 disk["image_id"] , vapp_uuid))
1462 self.add_existing_disk(catalogs=catalogs,
1463 image_id=disk["image_id"],
1464 size = disk["size"],
1465 template_name=templateName,
1466 vapp_uuid=vapp_uuid
1467 )
1468 added_existing_disk = True
1469 else:
1470 #Wait till added existing disk gets reflected into vCD database/API
1471 if added_existing_disk:
1472 time.sleep(5)
1473 added_existing_disk = False
1474 self.add_new_disk(vapp_uuid, disk['size'])
1475
1476 if numas:
1477 # Assigning numa affinity setting
1478 for numa in numas:
1479 if 'paired-threads-id' in numa:
1480 paired_threads_id = numa['paired-threads-id']
1481 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1482
1483 # add NICs & connect to networks in netlist
1484 try:
1485 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1486 nicIndex = 0
1487 primary_nic_index = 0
1488 for net in net_list:
1489 # openmano uses network id in UUID format.
1490 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1491 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1492 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1493
1494 if 'net_id' not in net:
1495 continue
1496
1497 interface_net_id = net['net_id']
1498 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1499 interface_network_mode = net['use']
1500
1501 if interface_network_mode == 'mgmt':
1502 primary_nic_index = nicIndex
1503
1504 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1505 - DHCP (The IP address is obtained from a DHCP service.)
1506 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1507 - NONE (No IP addressing mode specified.)"""
1508
1509 if primary_netname is not None:
1510 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1511 if len(nets) == 1:
1512 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1513
1514 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1515 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1516 if type(task) is GenericTask:
1517 self.vca.block_until_completed(task)
1518 # connect network to VM - with all DHCP by default
1519
1520 type_list = ['PF','VF','VFnotShared']
1521 if 'type' in net and net['type'] not in type_list:
1522 # fetching nic type from vnf
1523 if 'model' in net:
1524 nic_type = net['model']
1525 self.logger.info("new_vminstance(): adding network adapter "\
1526 "to a network {}".format(nets[0].name))
1527 self.add_network_adapter_to_vms(vapp, nets[0].name,
1528 primary_nic_index,
1529 nicIndex,
1530 net,
1531 nic_type=nic_type)
1532 else:
1533 self.logger.info("new_vminstance(): adding network adapter "\
1534 "to a network {}".format(nets[0].name))
1535 self.add_network_adapter_to_vms(vapp, nets[0].name,
1536 primary_nic_index,
1537 nicIndex,
1538 net)
1539 nicIndex += 1
1540
1541 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1542 # cloud-init for ssh-key injection
1543 if cloud_config:
1544 self.cloud_init(vapp,cloud_config)
1545
1546 # deploy and power on vm
1547 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1548 deploytask = vapp.deploy(powerOn=False)
1549 if type(deploytask) is GenericTask:
1550 self.vca.block_until_completed(deploytask)
1551
1552 # ############# Stub code for SRIOV #################
1553 #Add SRIOV
1554 # if len(sriov_net_info) > 0:
1555 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1556 # vmname_andid ))
1557 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1558 # sriov_net_info,
1559 # vmname_andid)
1560 # if sriov_status:
1561 # self.logger.info("Added SRIOV {} to VM {}".format(
1562 # sriov_net_info,
1563 # vmname_andid)
1564 # )
1565 # reserve_memory = True
1566 # else:
1567 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1568 # sriov_net_info,
1569 # vmname_andid)
1570 # )
1571
1572 # If VM has PCI devices or SRIOV reserve memory for VM
1573 if reserve_memory:
1574 memReserve = vm_obj.config.hardware.memoryMB
1575 spec = vim.vm.ConfigSpec()
1576 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1577 task = vm_obj.ReconfigVM_Task(spec=spec)
1578 if task:
1579 result = self.wait_for_vcenter_task(task, vcenter_conect)
1580 self.logger.info("Reserved memmoery {} MB for "\
1581 "VM VM status: {}".format(str(memReserve),result))
1582 else:
1583 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1584 str(memReserve),str(vm_obj)))
1585
1586 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1587
1588 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1589 poweron_task = vapp.poweron()
1590 if type(poweron_task) is GenericTask:
1591 self.vca.block_until_completed(poweron_task)
1592
1593 except Exception as exp :
1594 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1595 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1596 .format(name, exp))
1597 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1598 .format(name, exp))
1599
1600 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1601 wait_time = 0
1602 vapp_uuid = None
1603 while wait_time <= MAX_WAIT_TIME:
1604 try:
1605 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1606 except Exception as exp:
1607 raise vimconn.vimconnUnexpectedResponse(
1608 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1609 .format(vmname_andid, exp))
1610
1611 if vapp and vapp.me.deployed:
1612 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1613 break
1614 else:
1615 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1616 time.sleep(INTERVAL_TIME)
1617
1618 wait_time +=INTERVAL_TIME
1619
1620 if vapp_uuid is not None:
1621 return vapp_uuid
1622 else:
1623 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1624
1625 ##
1626 ##
1627 ## based on current discussion
1628 ##
1629 ##
1630 ## server:
1631 # created: '2016-09-08T11:51:58'
1632 # description: simple-instance.linux1.1
1633 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1634 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1635 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1636 # status: ACTIVE
1637 # error_msg:
1638 # interfaces: …
1639 #
1640 def get_vminstance(self, vim_vm_uuid=None):
1641 """Returns the VM instance information from VIM"""
1642
1643 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1644
1645 vdc = self.get_vdc_details()
1646 if vdc is None:
1647 raise vimconn.vimconnConnectionException(
1648 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1649
1650 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1651 if not vm_info_dict:
1652 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1653 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1654
1655 status_key = vm_info_dict['status']
1656 error = ''
1657 try:
1658 vm_dict = {'created': vm_info_dict['created'],
1659 'description': vm_info_dict['name'],
1660 'status': vcdStatusCode2manoFormat[int(status_key)],
1661 'hostId': vm_info_dict['vmuuid'],
1662 'error_msg': error,
1663 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1664
1665 if 'interfaces' in vm_info_dict:
1666 vm_dict['interfaces'] = vm_info_dict['interfaces']
1667 else:
1668 vm_dict['interfaces'] = []
1669 except KeyError:
1670 vm_dict = {'created': '',
1671 'description': '',
1672 'status': vcdStatusCode2manoFormat[int(-1)],
1673 'hostId': vm_info_dict['vmuuid'],
1674 'error_msg': "Inconsistency state",
1675 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1676
1677 return vm_dict
1678
1679 def delete_vminstance(self, vm__vim_uuid):
1680 """Method poweroff and remove VM instance from vcloud director network.
1681
1682 Args:
1683 vm__vim_uuid: VM UUID
1684
1685 Returns:
1686 Returns the instance identifier
1687 """
1688
1689 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1690
1691 vdc = self.get_vdc_details()
1692 if vdc is None:
1693 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1694 self.tenant_name))
1695 raise vimconn.vimconnException(
1696 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1697
1698 try:
1699 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1700 if vapp_name is None:
1701 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1702 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1703 else:
1704 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1705
1706 # Delete vApp and wait for status change if task executed and vApp is None.
1707 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1708
1709 if vapp:
1710 if vapp.me.deployed:
1711 self.logger.info("Powering off vApp {}".format(vapp_name))
1712 #Power off vApp
1713 powered_off = False
1714 wait_time = 0
1715 while wait_time <= MAX_WAIT_TIME:
1716 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1717 if not vapp:
1718 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1719 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1720
1721 power_off_task = vapp.poweroff()
1722 if type(power_off_task) is GenericTask:
1723 result = self.vca.block_until_completed(power_off_task)
1724 if result:
1725 powered_off = True
1726 break
1727 else:
1728 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1729 time.sleep(INTERVAL_TIME)
1730
1731 wait_time +=INTERVAL_TIME
1732 if not powered_off:
1733 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1734 else:
1735 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1736
1737 #Undeploy vApp
1738 self.logger.info("Undeploy vApp {}".format(vapp_name))
1739 wait_time = 0
1740 undeployed = False
1741 while wait_time <= MAX_WAIT_TIME:
1742 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1743 if not vapp:
1744 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1745 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1746 undeploy_task = vapp.undeploy(action='powerOff')
1747
1748 if type(undeploy_task) is GenericTask:
1749 result = self.vca.block_until_completed(undeploy_task)
1750 if result:
1751 undeployed = True
1752 break
1753 else:
1754 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1755 time.sleep(INTERVAL_TIME)
1756
1757 wait_time +=INTERVAL_TIME
1758
1759 if not undeployed:
1760 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1761
1762 # delete vapp
1763 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1764 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1765
1766 if vapp is not None:
1767 wait_time = 0
1768 result = False
1769
1770 while wait_time <= MAX_WAIT_TIME:
1771 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1772 if not vapp:
1773 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1774 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1775
1776 delete_task = vapp.delete()
1777
1778 if type(delete_task) is GenericTask:
1779 self.vca.block_until_completed(delete_task)
1780 result = self.vca.block_until_completed(delete_task)
1781 if result:
1782 break
1783 else:
1784 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1785 time.sleep(INTERVAL_TIME)
1786
1787 wait_time +=INTERVAL_TIME
1788
1789 if not result:
1790 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1791
1792 except:
1793 self.logger.debug(traceback.format_exc())
1794 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1795
1796 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1797 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1798 return vm__vim_uuid
1799 else:
1800 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1801
1802 def refresh_vms_status(self, vm_list):
1803 """Get the status of the virtual machines and their interfaces/ports
1804 Params: the list of VM identifiers
1805 Returns a dictionary with:
1806 vm_id: #VIM id of this Virtual Machine
1807 status: #Mandatory. Text with one of:
1808 # DELETED (not found at vim)
1809 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1810 # OTHER (Vim reported other status not understood)
1811 # ERROR (VIM indicates an ERROR status)
1812 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1813 # CREATING (on building process), ERROR
1814 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1815 #
1816 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1817 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1818 interfaces:
1819 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1820 mac_address: #Text format XX:XX:XX:XX:XX:XX
1821 vim_net_id: #network id where this interface is connected
1822 vim_interface_id: #interface/port VIM id
1823 ip_address: #null, or text with IPv4, IPv6 address
1824 """
1825
1826 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1827
1828 vdc = self.get_vdc_details()
1829 if vdc is None:
1830 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1831
1832 vms_dict = {}
1833 nsx_edge_list = []
1834 for vmuuid in vm_list:
1835 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1836 if vmname is not None:
1837
1838 try:
1839 vm_pci_details = self.get_vm_pci_details(vmuuid)
1840 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1841 vm_info = the_vapp.get_vms_details()
1842 vm_status = vm_info[0]['status']
1843 vm_info[0].update(vm_pci_details)
1844
1845 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1846 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1847 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1848
1849 # get networks
1850 vm_app_networks = the_vapp.get_vms_network_info()
1851 for vapp_network in vm_app_networks:
1852 for vm_network in vapp_network:
1853 if vm_network['name'] == vmname:
1854 #Assign IP Address based on MAC Address in NSX DHCP lease info
1855 if vm_network['ip'] is None:
1856 if not nsx_edge_list:
1857 nsx_edge_list = self.get_edge_details()
1858 if nsx_edge_list is None:
1859 raise vimconn.vimconnException("refresh_vms_status:"\
1860 "Failed to get edge details from NSX Manager")
1861 if vm_network['mac'] is not None:
1862 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1863
1864 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1865 interface = {"mac_address": vm_network['mac'],
1866 "vim_net_id": vm_net_id,
1867 "vim_interface_id": vm_net_id,
1868 'ip_address': vm_network['ip']}
1869 # interface['vim_info'] = yaml.safe_dump(vm_network)
1870 vm_dict["interfaces"].append(interface)
1871 # add a vm to vm dict
1872 vms_dict.setdefault(vmuuid, vm_dict)
1873 except Exception as exp:
1874 self.logger.debug("Error in response {}".format(exp))
1875 self.logger.debug(traceback.format_exc())
1876
1877 return vms_dict
1878
1879
1880 def get_edge_details(self):
1881 """Get the NSX edge list from NSX Manager
1882 Returns list of NSX edges
1883 """
1884 edge_list = []
1885 rheaders = {'Content-Type': 'application/xml'}
1886 nsx_api_url = '/api/4.0/edges'
1887
1888 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
1889
1890 try:
1891 resp = requests.get(self.nsx_manager + nsx_api_url,
1892 auth = (self.nsx_user, self.nsx_password),
1893 verify = False, headers = rheaders)
1894 if resp.status_code == requests.codes.ok:
1895 paged_Edge_List = XmlElementTree.fromstring(resp.text)
1896 for edge_pages in paged_Edge_List:
1897 if edge_pages.tag == 'edgePage':
1898 for edge_summary in edge_pages:
1899 if edge_summary.tag == 'pagingInfo':
1900 for element in edge_summary:
1901 if element.tag == 'totalCount' and element.text == '0':
1902 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
1903 .format(self.nsx_manager))
1904
1905 if edge_summary.tag == 'edgeSummary':
1906 for element in edge_summary:
1907 if element.tag == 'id':
1908 edge_list.append(element.text)
1909 else:
1910 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
1911 .format(self.nsx_manager))
1912
1913 if not edge_list:
1914 raise vimconn.vimconnException("get_edge_details: "\
1915 "No NSX edge details found: {}"
1916 .format(self.nsx_manager))
1917 else:
1918 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
1919 return edge_list
1920 else:
1921 self.logger.debug("get_edge_details: "
1922 "Failed to get NSX edge details from NSX Manager: {}"
1923 .format(resp.content))
1924 return None
1925
1926 except Exception as exp:
1927 self.logger.debug("get_edge_details: "\
1928 "Failed to get NSX edge details from NSX Manager: {}"
1929 .format(exp))
1930 raise vimconn.vimconnException("get_edge_details: "\
1931 "Failed to get NSX edge details from NSX Manager: {}"
1932 .format(exp))
1933
1934
1935 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
1936 """Get IP address details from NSX edges, using the MAC address
1937 PARAMS: nsx_edges : List of NSX edges
1938 mac_address : Find IP address corresponding to this MAC address
1939 Returns: IP address corrresponding to the provided MAC address
1940 """
1941
1942 ip_addr = None
1943 rheaders = {'Content-Type': 'application/xml'}
1944
1945 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
1946
1947 try:
1948 for edge in nsx_edges:
1949 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
1950
1951 resp = requests.get(self.nsx_manager + nsx_api_url,
1952 auth = (self.nsx_user, self.nsx_password),
1953 verify = False, headers = rheaders)
1954
1955 if resp.status_code == requests.codes.ok:
1956 dhcp_leases = XmlElementTree.fromstring(resp.text)
1957 for child in dhcp_leases:
1958 if child.tag == 'dhcpLeaseInfo':
1959 dhcpLeaseInfo = child
1960 for leaseInfo in dhcpLeaseInfo:
1961 for elem in leaseInfo:
1962 if (elem.tag)=='macAddress':
1963 edge_mac_addr = elem.text
1964 if (elem.tag)=='ipAddress':
1965 ip_addr = elem.text
1966 if edge_mac_addr is not None:
1967 if edge_mac_addr == mac_address:
1968 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
1969 .format(ip_addr, mac_address,edge))
1970 return ip_addr
1971 else:
1972 self.logger.debug("get_ipaddr_from_NSXedge: "\
1973 "Error occurred while getting DHCP lease info from NSX Manager: {}"
1974 .format(resp.content))
1975
1976 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
1977 return None
1978
1979 except XmlElementTree.ParseError as Err:
1980 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
1981
1982
1983 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
1984 """Send and action over a VM instance from VIM
1985 Returns the vm_id if the action was successfully sent to the VIM"""
1986
1987 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
1988 if vm__vim_uuid is None or action_dict is None:
1989 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
1990
1991 vdc = self.get_vdc_details()
1992 if vdc is None:
1993 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
1994
1995 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1996 if vapp_name is None:
1997 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1998 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1999 else:
2000 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2001
2002 try:
2003 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2004 # TODO fix all status
2005 if "start" in action_dict:
2006 vm_info = the_vapp.get_vms_details()
2007 vm_status = vm_info[0]['status']
2008 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2009 if vm_status == "Suspended" or vm_status == "Powered off":
2010 power_on_task = the_vapp.poweron()
2011 result = self.vca.block_until_completed(power_on_task)
2012 self.instance_actions_result("start", result, vapp_name)
2013 elif "rebuild" in action_dict:
2014 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2015 rebuild_task = the_vapp.deploy(powerOn=True)
2016 result = self.vca.block_until_completed(rebuild_task)
2017 self.instance_actions_result("rebuild", result, vapp_name)
2018 elif "pause" in action_dict:
2019 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2020 pause_task = the_vapp.undeploy(action='suspend')
2021 result = self.vca.block_until_completed(pause_task)
2022 self.instance_actions_result("pause", result, vapp_name)
2023 elif "resume" in action_dict:
2024 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2025 power_task = the_vapp.poweron()
2026 result = self.vca.block_until_completed(power_task)
2027 self.instance_actions_result("resume", result, vapp_name)
2028 elif "shutoff" in action_dict or "shutdown" in action_dict:
2029 action_name , value = action_dict.items()[0]
2030 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2031 power_off_task = the_vapp.undeploy(action='powerOff')
2032 result = self.vca.block_until_completed(power_off_task)
2033 if action_name == "shutdown":
2034 self.instance_actions_result("shutdown", result, vapp_name)
2035 else:
2036 self.instance_actions_result("shutoff", result, vapp_name)
2037 elif "forceOff" in action_dict:
2038 result = the_vapp.undeploy(action='force')
2039 self.instance_actions_result("forceOff", result, vapp_name)
2040 elif "reboot" in action_dict:
2041 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2042 reboot_task = the_vapp.reboot()
2043 else:
2044 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2045 return vm__vim_uuid
2046 except Exception as exp :
2047 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2048 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2049
2050 def instance_actions_result(self, action, result, vapp_name):
2051 if result:
2052 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2053 else:
2054 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2055
2056 def get_vminstance_console(self, vm_id, console_type="vnc"):
2057 """
2058 Get a console for the virtual machine
2059 Params:
2060 vm_id: uuid of the VM
2061 console_type, can be:
2062 "novnc" (by default), "xvpvnc" for VNC types,
2063 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2064 Returns dict with the console parameters:
2065 protocol: ssh, ftp, http, https, ...
2066 server: usually ip address
2067 port: the http, ssh, ... port
2068 suffix: extra text, e.g. the http path and query string
2069 """
2070 raise vimconn.vimconnNotImplemented("Should have implemented this")
2071
2072 # NOT USED METHODS in current version
2073
2074 def host_vim2gui(self, host, server_dict):
2075 """Transform host dictionary from VIM format to GUI format,
2076 and append to the server_dict
2077 """
2078 raise vimconn.vimconnNotImplemented("Should have implemented this")
2079
2080 def get_hosts_info(self):
2081 """Get the information of deployed hosts
2082 Returns the hosts content"""
2083 raise vimconn.vimconnNotImplemented("Should have implemented this")
2084
2085 def get_hosts(self, vim_tenant):
2086 """Get the hosts and deployed instances
2087 Returns the hosts content"""
2088 raise vimconn.vimconnNotImplemented("Should have implemented this")
2089
2090 def get_processor_rankings(self):
2091 """Get the processor rankings in the VIM database"""
2092 raise vimconn.vimconnNotImplemented("Should have implemented this")
2093
2094 def new_host(self, host_data):
2095 """Adds a new host to VIM"""
2096 '''Returns status code of the VIM response'''
2097 raise vimconn.vimconnNotImplemented("Should have implemented this")
2098
2099 def new_external_port(self, port_data):
2100 """Adds a external port to VIM"""
2101 '''Returns the port identifier'''
2102 raise vimconn.vimconnNotImplemented("Should have implemented this")
2103
2104 def new_external_network(self, net_name, net_type):
2105 """Adds a external network to VIM (shared)"""
2106 '''Returns the network identifier'''
2107 raise vimconn.vimconnNotImplemented("Should have implemented this")
2108
2109 def connect_port_network(self, port_id, network_id, admin=False):
2110 """Connects a external port to a network"""
2111 '''Returns status code of the VIM response'''
2112 raise vimconn.vimconnNotImplemented("Should have implemented this")
2113
2114 def new_vminstancefromJSON(self, vm_data):
2115 """Adds a VM instance to VIM"""
2116 '''Returns the instance identifier'''
2117 raise vimconn.vimconnNotImplemented("Should have implemented this")
2118
2119 def get_network_name_by_id(self, network_uuid=None):
2120 """Method gets vcloud director network named based on supplied uuid.
2121
2122 Args:
2123 network_uuid: network_id
2124
2125 Returns:
2126 The return network name.
2127 """
2128
2129 if not network_uuid:
2130 return None
2131
2132 try:
2133 org_dict = self.get_org(self.org_uuid)
2134 if 'networks' in org_dict:
2135 org_network_dict = org_dict['networks']
2136 for net_uuid in org_network_dict:
2137 if net_uuid == network_uuid:
2138 return org_network_dict[net_uuid]
2139 except:
2140 self.logger.debug("Exception in get_network_name_by_id")
2141 self.logger.debug(traceback.format_exc())
2142
2143 return None
2144
2145 def get_network_id_by_name(self, network_name=None):
2146 """Method gets vcloud director network uuid based on supplied name.
2147
2148 Args:
2149 network_name: network_name
2150 Returns:
2151 The return network uuid.
2152 network_uuid: network_id
2153 """
2154
2155 if not network_name:
2156 self.logger.debug("get_network_id_by_name() : Network name is empty")
2157 return None
2158
2159 try:
2160 org_dict = self.get_org(self.org_uuid)
2161 if org_dict and 'networks' in org_dict:
2162 org_network_dict = org_dict['networks']
2163 for net_uuid,net_name in org_network_dict.iteritems():
2164 if net_name == network_name:
2165 return net_uuid
2166
2167 except KeyError as exp:
2168 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2169
2170 return None
2171
2172 def list_org_action(self):
2173 """
2174 Method leverages vCloud director and query for available organization for particular user
2175
2176 Args:
2177 vca - is active VCA connection.
2178 vdc_name - is a vdc name that will be used to query vms action
2179
2180 Returns:
2181 The return XML respond
2182 """
2183
2184 url_list = [self.vca.host, '/api/org']
2185 vm_list_rest_call = ''.join(url_list)
2186
2187 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2188 response = Http.get(url=vm_list_rest_call,
2189 headers=self.vca.vcloud_session.get_vcloud_headers(),
2190 verify=self.vca.verify,
2191 logger=self.vca.logger)
2192
2193 if response.status_code == 403:
2194 response = self.retry_rest('GET', vm_list_rest_call)
2195
2196 if response.status_code == requests.codes.ok:
2197 return response.content
2198
2199 return None
2200
2201 def get_org_action(self, org_uuid=None):
2202 """
2203 Method leverages vCloud director and retrieve available object fdr organization.
2204
2205 Args:
2206 vca - is active VCA connection.
2207 vdc_name - is a vdc name that will be used to query vms action
2208
2209 Returns:
2210 The return XML respond
2211 """
2212
2213 if org_uuid is None:
2214 return None
2215
2216 url_list = [self.vca.host, '/api/org/', org_uuid]
2217 vm_list_rest_call = ''.join(url_list)
2218
2219 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2220 response = Http.get(url=vm_list_rest_call,
2221 headers=self.vca.vcloud_session.get_vcloud_headers(),
2222 verify=self.vca.verify,
2223 logger=self.vca.logger)
2224
2225 #Retry login if session expired & retry sending request
2226 if response.status_code == 403:
2227 response = self.retry_rest('GET', vm_list_rest_call)
2228
2229 if response.status_code == requests.codes.ok:
2230 return response.content
2231
2232 return None
2233
2234 def get_org(self, org_uuid=None):
2235 """
2236 Method retrieves available organization in vCloud Director
2237
2238 Args:
2239 org_uuid - is a organization uuid.
2240
2241 Returns:
2242 The return dictionary with following key
2243 "network" - for network list under the org
2244 "catalogs" - for network list under the org
2245 "vdcs" - for vdc list under org
2246 """
2247
2248 org_dict = {}
2249
2250 if org_uuid is None:
2251 return org_dict
2252
2253 content = self.get_org_action(org_uuid=org_uuid)
2254 try:
2255 vdc_list = {}
2256 network_list = {}
2257 catalog_list = {}
2258 vm_list_xmlroot = XmlElementTree.fromstring(content)
2259 for child in vm_list_xmlroot:
2260 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2261 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2262 org_dict['vdcs'] = vdc_list
2263 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2264 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2265 org_dict['networks'] = network_list
2266 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2267 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2268 org_dict['catalogs'] = catalog_list
2269 except:
2270 pass
2271
2272 return org_dict
2273
2274 def get_org_list(self):
2275 """
2276 Method retrieves available organization in vCloud Director
2277
2278 Args:
2279 vca - is active VCA connection.
2280
2281 Returns:
2282 The return dictionary and key for each entry VDC UUID
2283 """
2284
2285 org_dict = {}
2286
2287 content = self.list_org_action()
2288 try:
2289 vm_list_xmlroot = XmlElementTree.fromstring(content)
2290 for vm_xml in vm_list_xmlroot:
2291 if vm_xml.tag.split("}")[1] == 'Org':
2292 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2293 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2294 except:
2295 pass
2296
2297 return org_dict
2298
2299 def vms_view_action(self, vdc_name=None):
2300 """ Method leverages vCloud director vms query call
2301
2302 Args:
2303 vca - is active VCA connection.
2304 vdc_name - is a vdc name that will be used to query vms action
2305
2306 Returns:
2307 The return XML respond
2308 """
2309 vca = self.connect()
2310 if vdc_name is None:
2311 return None
2312
2313 url_list = [vca.host, '/api/vms/query']
2314 vm_list_rest_call = ''.join(url_list)
2315
2316 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2317 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2318 vca.vcloud_session.organization.Link)
2319 if len(refs) == 1:
2320 response = Http.get(url=vm_list_rest_call,
2321 headers=vca.vcloud_session.get_vcloud_headers(),
2322 verify=vca.verify,
2323 logger=vca.logger)
2324 if response.status_code == requests.codes.ok:
2325 return response.content
2326
2327 return None
2328
2329 def get_vapp_list(self, vdc_name=None):
2330 """
2331 Method retrieves vApp list deployed vCloud director and returns a dictionary
2332 contains a list of all vapp deployed for queried VDC.
2333 The key for a dictionary is vApp UUID
2334
2335
2336 Args:
2337 vca - is active VCA connection.
2338 vdc_name - is a vdc name that will be used to query vms action
2339
2340 Returns:
2341 The return dictionary and key for each entry vapp UUID
2342 """
2343
2344 vapp_dict = {}
2345 if vdc_name is None:
2346 return vapp_dict
2347
2348 content = self.vms_view_action(vdc_name=vdc_name)
2349 try:
2350 vm_list_xmlroot = XmlElementTree.fromstring(content)
2351 for vm_xml in vm_list_xmlroot:
2352 if vm_xml.tag.split("}")[1] == 'VMRecord':
2353 if vm_xml.attrib['isVAppTemplate'] == 'true':
2354 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2355 if 'vappTemplate-' in rawuuid[0]:
2356 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2357 # vm and use raw UUID as key
2358 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2359 except:
2360 pass
2361
2362 return vapp_dict
2363
2364 def get_vm_list(self, vdc_name=None):
2365 """
2366 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2367 contains a list of all VM's deployed for queried VDC.
2368 The key for a dictionary is VM UUID
2369
2370
2371 Args:
2372 vca - is active VCA connection.
2373 vdc_name - is a vdc name that will be used to query vms action
2374
2375 Returns:
2376 The return dictionary and key for each entry vapp UUID
2377 """
2378 vm_dict = {}
2379
2380 if vdc_name is None:
2381 return vm_dict
2382
2383 content = self.vms_view_action(vdc_name=vdc_name)
2384 try:
2385 vm_list_xmlroot = XmlElementTree.fromstring(content)
2386 for vm_xml in vm_list_xmlroot:
2387 if vm_xml.tag.split("}")[1] == 'VMRecord':
2388 if vm_xml.attrib['isVAppTemplate'] == 'false':
2389 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2390 if 'vm-' in rawuuid[0]:
2391 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2392 # vm and use raw UUID as key
2393 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2394 except:
2395 pass
2396
2397 return vm_dict
2398
2399 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2400 """
2401 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2402 contains a list of all VM's deployed for queried VDC.
2403 The key for a dictionary is VM UUID
2404
2405
2406 Args:
2407 vca - is active VCA connection.
2408 vdc_name - is a vdc name that will be used to query vms action
2409
2410 Returns:
2411 The return dictionary and key for each entry vapp UUID
2412 """
2413 vm_dict = {}
2414 vca = self.connect()
2415 if not vca:
2416 raise vimconn.vimconnConnectionException("self.connect() is failed")
2417
2418 if vdc_name is None:
2419 return vm_dict
2420
2421 content = self.vms_view_action(vdc_name=vdc_name)
2422 try:
2423 vm_list_xmlroot = XmlElementTree.fromstring(content)
2424 for vm_xml in vm_list_xmlroot:
2425 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2426 # lookup done by UUID
2427 if isuuid:
2428 if vapp_name in vm_xml.attrib['container']:
2429 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2430 if 'vm-' in rawuuid[0]:
2431 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2432 break
2433 # lookup done by Name
2434 else:
2435 if vapp_name in vm_xml.attrib['name']:
2436 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2437 if 'vm-' in rawuuid[0]:
2438 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2439 break
2440 except:
2441 pass
2442
2443 return vm_dict
2444
2445 def get_network_action(self, network_uuid=None):
2446 """
2447 Method leverages vCloud director and query network based on network uuid
2448
2449 Args:
2450 vca - is active VCA connection.
2451 network_uuid - is a network uuid
2452
2453 Returns:
2454 The return XML respond
2455 """
2456
2457 if network_uuid is None:
2458 return None
2459
2460 url_list = [self.vca.host, '/api/network/', network_uuid]
2461 vm_list_rest_call = ''.join(url_list)
2462
2463 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2464 response = Http.get(url=vm_list_rest_call,
2465 headers=self.vca.vcloud_session.get_vcloud_headers(),
2466 verify=self.vca.verify,
2467 logger=self.vca.logger)
2468
2469 #Retry login if session expired & retry sending request
2470 if response.status_code == 403:
2471 response = self.retry_rest('GET', vm_list_rest_call)
2472
2473 if response.status_code == requests.codes.ok:
2474 return response.content
2475
2476 return None
2477
2478 def get_vcd_network(self, network_uuid=None):
2479 """
2480 Method retrieves available network from vCloud Director
2481
2482 Args:
2483 network_uuid - is VCD network UUID
2484
2485 Each element serialized as key : value pair
2486
2487 Following keys available for access. network_configuration['Gateway'}
2488 <Configuration>
2489 <IpScopes>
2490 <IpScope>
2491 <IsInherited>true</IsInherited>
2492 <Gateway>172.16.252.100</Gateway>
2493 <Netmask>255.255.255.0</Netmask>
2494 <Dns1>172.16.254.201</Dns1>
2495 <Dns2>172.16.254.202</Dns2>
2496 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2497 <IsEnabled>true</IsEnabled>
2498 <IpRanges>
2499 <IpRange>
2500 <StartAddress>172.16.252.1</StartAddress>
2501 <EndAddress>172.16.252.99</EndAddress>
2502 </IpRange>
2503 </IpRanges>
2504 </IpScope>
2505 </IpScopes>
2506 <FenceMode>bridged</FenceMode>
2507
2508 Returns:
2509 The return dictionary and key for each entry vapp UUID
2510 """
2511
2512 network_configuration = {}
2513 if network_uuid is None:
2514 return network_uuid
2515
2516 try:
2517 content = self.get_network_action(network_uuid=network_uuid)
2518 vm_list_xmlroot = XmlElementTree.fromstring(content)
2519
2520 network_configuration['status'] = vm_list_xmlroot.get("status")
2521 network_configuration['name'] = vm_list_xmlroot.get("name")
2522 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2523
2524 for child in vm_list_xmlroot:
2525 if child.tag.split("}")[1] == 'IsShared':
2526 network_configuration['isShared'] = child.text.strip()
2527 if child.tag.split("}")[1] == 'Configuration':
2528 for configuration in child.iter():
2529 tagKey = configuration.tag.split("}")[1].strip()
2530 if tagKey != "":
2531 network_configuration[tagKey] = configuration.text.strip()
2532 return network_configuration
2533 except Exception as exp :
2534 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2535 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2536
2537 return network_configuration
2538
2539 def delete_network_action(self, network_uuid=None):
2540 """
2541 Method delete given network from vCloud director
2542
2543 Args:
2544 network_uuid - is a network uuid that client wish to delete
2545
2546 Returns:
2547 The return None or XML respond or false
2548 """
2549
2550 vca = self.connect_as_admin()
2551 if not vca:
2552 raise vimconn.vimconnConnectionException("self.connect() is failed")
2553 if network_uuid is None:
2554 return False
2555
2556 url_list = [vca.host, '/api/admin/network/', network_uuid]
2557 vm_list_rest_call = ''.join(url_list)
2558
2559 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2560 response = Http.delete(url=vm_list_rest_call,
2561 headers=vca.vcloud_session.get_vcloud_headers(),
2562 verify=vca.verify,
2563 logger=vca.logger)
2564
2565 if response.status_code == 202:
2566 return True
2567
2568 return False
2569
2570 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2571 ip_profile=None, isshared='true'):
2572 """
2573 Method create network in vCloud director
2574
2575 Args:
2576 network_name - is network name to be created.
2577 net_type - can be 'bridge','data','ptp','mgmt'.
2578 ip_profile is a dict containing the IP parameters of the network
2579 isshared - is a boolean
2580 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2581 It optional attribute. by default if no parent network indicate the first available will be used.
2582
2583 Returns:
2584 The return network uuid or return None
2585 """
2586
2587 new_network_name = [network_name, '-', str(uuid.uuid4())]
2588 content = self.create_network_rest(network_name=''.join(new_network_name),
2589 ip_profile=ip_profile,
2590 net_type=net_type,
2591 parent_network_uuid=parent_network_uuid,
2592 isshared=isshared)
2593 if content is None:
2594 self.logger.debug("Failed create network {}.".format(network_name))
2595 return None
2596
2597 try:
2598 vm_list_xmlroot = XmlElementTree.fromstring(content)
2599 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2600 if len(vcd_uuid) == 4:
2601 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2602 return vcd_uuid[3]
2603 except:
2604 self.logger.debug("Failed create network {}".format(network_name))
2605 return None
2606
2607 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2608 ip_profile=None, isshared='true'):
2609 """
2610 Method create network in vCloud director
2611
2612 Args:
2613 network_name - is network name to be created.
2614 net_type - can be 'bridge','data','ptp','mgmt'.
2615 ip_profile is a dict containing the IP parameters of the network
2616 isshared - is a boolean
2617 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2618 It optional attribute. by default if no parent network indicate the first available will be used.
2619
2620 Returns:
2621 The return network uuid or return None
2622 """
2623
2624 vca = self.connect_as_admin()
2625 if not vca:
2626 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2627 if network_name is None:
2628 return None
2629
2630 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2631 vm_list_rest_call = ''.join(url_list)
2632 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2633 response = Http.get(url=vm_list_rest_call,
2634 headers=vca.vcloud_session.get_vcloud_headers(),
2635 verify=vca.verify,
2636 logger=vca.logger)
2637
2638 provider_network = None
2639 available_networks = None
2640 add_vdc_rest_url = None
2641
2642 if response.status_code != requests.codes.ok:
2643 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2644 response.status_code))
2645 return None
2646 else:
2647 try:
2648 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2649 for child in vm_list_xmlroot:
2650 if child.tag.split("}")[1] == 'ProviderVdcReference':
2651 provider_network = child.attrib.get('href')
2652 # application/vnd.vmware.admin.providervdc+xml
2653 if child.tag.split("}")[1] == 'Link':
2654 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2655 and child.attrib.get('rel') == 'add':
2656 add_vdc_rest_url = child.attrib.get('href')
2657 except:
2658 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2659 self.logger.debug("Respond body {}".format(response.content))
2660 return None
2661
2662 # find pvdc provided available network
2663 response = Http.get(url=provider_network,
2664 headers=vca.vcloud_session.get_vcloud_headers(),
2665 verify=vca.verify,
2666 logger=vca.logger)
2667 if response.status_code != requests.codes.ok:
2668 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2669 response.status_code))
2670 return None
2671
2672 # available_networks.split("/")[-1]
2673
2674 if parent_network_uuid is None:
2675 try:
2676 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2677 for child in vm_list_xmlroot.iter():
2678 if child.tag.split("}")[1] == 'AvailableNetworks':
2679 for networks in child.iter():
2680 # application/vnd.vmware.admin.network+xml
2681 if networks.attrib.get('href') is not None:
2682 available_networks = networks.attrib.get('href')
2683 break
2684 except:
2685 return None
2686
2687 try:
2688 #Configure IP profile of the network
2689 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2690
2691 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2692 subnet_rand = random.randint(0, 255)
2693 ip_base = "192.168.{}.".format(subnet_rand)
2694 ip_profile['subnet_address'] = ip_base + "0/24"
2695 else:
2696 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2697
2698 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2699 ip_profile['gateway_address']=ip_base + "1"
2700 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2701 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2702 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2703 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2704 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2705 ip_profile['dhcp_start_address']=ip_base + "3"
2706 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2707 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2708 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2709 ip_profile['dns_address']=ip_base + "2"
2710
2711 gateway_address=ip_profile['gateway_address']
2712 dhcp_count=int(ip_profile['dhcp_count'])
2713 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2714
2715 if ip_profile['dhcp_enabled']==True:
2716 dhcp_enabled='true'
2717 else:
2718 dhcp_enabled='false'
2719 dhcp_start_address=ip_profile['dhcp_start_address']
2720
2721 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2722 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2723 end_ip_int += dhcp_count - 1
2724 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2725
2726 ip_version=ip_profile['ip_version']
2727 dns_address=ip_profile['dns_address']
2728 except KeyError as exp:
2729 self.logger.debug("Create Network REST: Key error {}".format(exp))
2730 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2731
2732 # either use client provided UUID or search for a first available
2733 # if both are not defined we return none
2734 if parent_network_uuid is not None:
2735 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2736 add_vdc_rest_url = ''.join(url_list)
2737
2738 #Creating all networks as Direct Org VDC type networks.
2739 #Unused in case of Underlay (data/ptp) network interface.
2740 fence_mode="bridged"
2741 is_inherited='false'
2742 dns_list = dns_address.split(";")
2743 dns1 = dns_list[0]
2744 dns2_text = ""
2745 if len(dns_list) >= 2:
2746 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2747 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2748 <Description>Openmano created</Description>
2749 <Configuration>
2750 <IpScopes>
2751 <IpScope>
2752 <IsInherited>{1:s}</IsInherited>
2753 <Gateway>{2:s}</Gateway>
2754 <Netmask>{3:s}</Netmask>
2755 <Dns1>{4:s}</Dns1>{5:s}
2756 <IsEnabled>{6:s}</IsEnabled>
2757 <IpRanges>
2758 <IpRange>
2759 <StartAddress>{7:s}</StartAddress>
2760 <EndAddress>{8:s}</EndAddress>
2761 </IpRange>
2762 </IpRanges>
2763 </IpScope>
2764 </IpScopes>
2765 <ParentNetwork href="{9:s}"/>
2766 <FenceMode>{10:s}</FenceMode>
2767 </Configuration>
2768 <IsShared>{11:s}</IsShared>
2769 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2770 subnet_address, dns1, dns2_text, dhcp_enabled,
2771 dhcp_start_address, dhcp_end_address, available_networks,
2772 fence_mode, isshared)
2773
2774 headers = vca.vcloud_session.get_vcloud_headers()
2775 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2776 try:
2777 response = Http.post(url=add_vdc_rest_url,
2778 headers=headers,
2779 data=data,
2780 verify=vca.verify,
2781 logger=vca.logger)
2782
2783 if response.status_code != 201:
2784 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2785 .format(response.status_code,response.content))
2786 else:
2787 network = networkType.parseString(response.content, True)
2788 create_nw_task = network.get_Tasks().get_Task()[0]
2789
2790 # if we all ok we respond with content after network creation completes
2791 # otherwise by default return None
2792 if create_nw_task is not None:
2793 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2794 status = vca.block_until_completed(create_nw_task)
2795 if status:
2796 return response.content
2797 else:
2798 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2799 .format(response.content))
2800 except Exception as exp:
2801 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2802
2803 return None
2804
2805 def convert_cidr_to_netmask(self, cidr_ip=None):
2806 """
2807 Method sets convert CIDR netmask address to normal IP format
2808 Args:
2809 cidr_ip : CIDR IP address
2810 Returns:
2811 netmask : Converted netmask
2812 """
2813 if cidr_ip is not None:
2814 if '/' in cidr_ip:
2815 network, net_bits = cidr_ip.split('/')
2816 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2817 else:
2818 netmask = cidr_ip
2819 return netmask
2820 return None
2821
2822 def get_provider_rest(self, vca=None):
2823 """
2824 Method gets provider vdc view from vcloud director
2825
2826 Args:
2827 network_name - is network name to be created.
2828 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2829 It optional attribute. by default if no parent network indicate the first available will be used.
2830
2831 Returns:
2832 The return xml content of respond or None
2833 """
2834
2835 url_list = [vca.host, '/api/admin']
2836 response = Http.get(url=''.join(url_list),
2837 headers=vca.vcloud_session.get_vcloud_headers(),
2838 verify=vca.verify,
2839 logger=vca.logger)
2840
2841 if response.status_code == requests.codes.ok:
2842 return response.content
2843 return None
2844
2845 def create_vdc(self, vdc_name=None):
2846
2847 vdc_dict = {}
2848
2849 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2850 if xml_content is not None:
2851 try:
2852 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2853 for child in task_resp_xmlroot:
2854 if child.tag.split("}")[1] == 'Owner':
2855 vdc_id = child.attrib.get('href').split("/")[-1]
2856 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2857 return vdc_dict
2858 except:
2859 self.logger.debug("Respond body {}".format(xml_content))
2860
2861 return None
2862
2863 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2864 """
2865 Method create vdc in vCloud director based on VDC template.
2866 it uses pre-defined template that must be named openmano
2867
2868 Args:
2869 vdc_name - name of a new vdc.
2870
2871 Returns:
2872 The return xml content of respond or None
2873 """
2874
2875 self.logger.info("Creating new vdc {}".format(vdc_name))
2876 vca = self.connect()
2877 if not vca:
2878 raise vimconn.vimconnConnectionException("self.connect() is failed")
2879 if vdc_name is None:
2880 return None
2881
2882 url_list = [vca.host, '/api/vdcTemplates']
2883 vm_list_rest_call = ''.join(url_list)
2884 response = Http.get(url=vm_list_rest_call,
2885 headers=vca.vcloud_session.get_vcloud_headers(),
2886 verify=vca.verify,
2887 logger=vca.logger)
2888
2889 # container url to a template
2890 vdc_template_ref = None
2891 try:
2892 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2893 for child in vm_list_xmlroot:
2894 # application/vnd.vmware.admin.providervdc+xml
2895 # we need find a template from witch we instantiate VDC
2896 if child.tag.split("}")[1] == 'VdcTemplate':
2897 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
2898 'name') == 'openmano':
2899 vdc_template_ref = child.attrib.get('href')
2900 except:
2901 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2902 self.logger.debug("Respond body {}".format(response.content))
2903 return None
2904
2905 # if we didn't found required pre defined template we return None
2906 if vdc_template_ref is None:
2907 return None
2908
2909 try:
2910 # instantiate vdc
2911 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
2912 vm_list_rest_call = ''.join(url_list)
2913 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2914 <Source href="{1:s}"></Source>
2915 <Description>opnemano</Description>
2916 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
2917 headers = vca.vcloud_session.get_vcloud_headers()
2918 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
2919 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
2920 logger=vca.logger)
2921 # if we all ok we respond with content otherwise by default None
2922 if response.status_code >= 200 and response.status_code < 300:
2923 return response.content
2924 return None
2925 except:
2926 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2927 self.logger.debug("Respond body {}".format(response.content))
2928
2929 return None
2930
2931 def create_vdc_rest(self, vdc_name=None):
2932 """
2933 Method create network in vCloud director
2934
2935 Args:
2936 network_name - is network name to be created.
2937 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2938 It optional attribute. by default if no parent network indicate the first available will be used.
2939
2940 Returns:
2941 The return network uuid or return None
2942 """
2943
2944 self.logger.info("Creating new vdc {}".format(vdc_name))
2945
2946 vca = self.connect_as_admin()
2947 if not vca:
2948 raise vimconn.vimconnConnectionException("self.connect() is failed")
2949 if vdc_name is None:
2950 return None
2951
2952 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
2953 vm_list_rest_call = ''.join(url_list)
2954 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2955 response = Http.get(url=vm_list_rest_call,
2956 headers=vca.vcloud_session.get_vcloud_headers(),
2957 verify=vca.verify,
2958 logger=vca.logger)
2959
2960 provider_vdc_ref = None
2961 add_vdc_rest_url = None
2962 available_networks = None
2963
2964 if response.status_code != requests.codes.ok:
2965 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2966 response.status_code))
2967 return None
2968 else:
2969 try:
2970 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2971 for child in vm_list_xmlroot:
2972 # application/vnd.vmware.admin.providervdc+xml
2973 if child.tag.split("}")[1] == 'Link':
2974 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
2975 and child.attrib.get('rel') == 'add':
2976 add_vdc_rest_url = child.attrib.get('href')
2977 except:
2978 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2979 self.logger.debug("Respond body {}".format(response.content))
2980 return None
2981
2982 response = self.get_provider_rest(vca=vca)
2983 try:
2984 vm_list_xmlroot = XmlElementTree.fromstring(response)
2985 for child in vm_list_xmlroot:
2986 if child.tag.split("}")[1] == 'ProviderVdcReferences':
2987 for sub_child in child:
2988 provider_vdc_ref = sub_child.attrib.get('href')
2989 except:
2990 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2991 self.logger.debug("Respond body {}".format(response))
2992 return None
2993
2994 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
2995 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
2996 <AllocationModel>ReservationPool</AllocationModel>
2997 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
2998 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
2999 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3000 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3001 <ProviderVdcReference
3002 name="Main Provider"
3003 href="{2:s}" />
3004 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3005 escape(vdc_name),
3006 provider_vdc_ref)
3007
3008 headers = vca.vcloud_session.get_vcloud_headers()
3009 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3010 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3011 logger=vca.logger)
3012
3013 # if we all ok we respond with content otherwise by default None
3014 if response.status_code == 201:
3015 return response.content
3016 return None
3017
3018 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3019 """
3020 Method retrieve vapp detail from vCloud director
3021
3022 Args:
3023 vapp_uuid - is vapp identifier.
3024
3025 Returns:
3026 The return network uuid or return None
3027 """
3028
3029 parsed_respond = {}
3030 vca = None
3031
3032 if need_admin_access:
3033 vca = self.connect_as_admin()
3034 else:
3035 vca = self.vca
3036
3037 if not vca:
3038 raise vimconn.vimconnConnectionException("self.connect() is failed")
3039 if vapp_uuid is None:
3040 return None
3041
3042 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3043 get_vapp_restcall = ''.join(url_list)
3044
3045 if vca.vcloud_session and vca.vcloud_session.organization:
3046 response = Http.get(url=get_vapp_restcall,
3047 headers=vca.vcloud_session.get_vcloud_headers(),
3048 verify=vca.verify,
3049 logger=vca.logger)
3050
3051 if response.status_code == 403:
3052 if need_admin_access == False:
3053 response = self.retry_rest('GET', get_vapp_restcall)
3054
3055 if response.status_code != requests.codes.ok:
3056 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3057 response.status_code))
3058 return parsed_respond
3059
3060 try:
3061 xmlroot_respond = XmlElementTree.fromstring(response.content)
3062 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3063
3064 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3065 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3066 'vmw': 'http://www.vmware.com/schema/ovf',
3067 'vm': 'http://www.vmware.com/vcloud/v1.5',
3068 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3069 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3070 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3071 }
3072
3073 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3074 if created_section is not None:
3075 parsed_respond['created'] = created_section.text
3076
3077 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3078 if network_section is not None and 'networkName' in network_section.attrib:
3079 parsed_respond['networkname'] = network_section.attrib['networkName']
3080
3081 ipscopes_section = \
3082 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3083 namespaces)
3084 if ipscopes_section is not None:
3085 for ipscope in ipscopes_section:
3086 for scope in ipscope:
3087 tag_key = scope.tag.split("}")[1]
3088 if tag_key == 'IpRanges':
3089 ip_ranges = scope.getchildren()
3090 for ipblock in ip_ranges:
3091 for block in ipblock:
3092 parsed_respond[block.tag.split("}")[1]] = block.text
3093 else:
3094 parsed_respond[tag_key] = scope.text
3095
3096 # parse children section for other attrib
3097 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3098 if children_section is not None:
3099 parsed_respond['name'] = children_section.attrib['name']
3100 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3101 if "nestedHypervisorEnabled" in children_section.attrib else None
3102 parsed_respond['deployed'] = children_section.attrib['deployed']
3103 parsed_respond['status'] = children_section.attrib['status']
3104 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3105 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3106 nic_list = []
3107 for adapters in network_adapter:
3108 adapter_key = adapters.tag.split("}")[1]
3109 if adapter_key == 'PrimaryNetworkConnectionIndex':
3110 parsed_respond['primarynetwork'] = adapters.text
3111 if adapter_key == 'NetworkConnection':
3112 vnic = {}
3113 if 'network' in adapters.attrib:
3114 vnic['network'] = adapters.attrib['network']
3115 for adapter in adapters:
3116 setting_key = adapter.tag.split("}")[1]
3117 vnic[setting_key] = adapter.text
3118 nic_list.append(vnic)
3119
3120 for link in children_section:
3121 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3122 if link.attrib['rel'] == 'screen:acquireTicket':
3123 parsed_respond['acquireTicket'] = link.attrib
3124 if link.attrib['rel'] == 'screen:acquireMksTicket':
3125 parsed_respond['acquireMksTicket'] = link.attrib
3126
3127 parsed_respond['interfaces'] = nic_list
3128 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3129 if vCloud_extension_section is not None:
3130 vm_vcenter_info = {}
3131 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3132 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3133 if vmext is not None:
3134 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3135 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3136
3137 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3138 vm_virtual_hardware_info = {}
3139 if virtual_hardware_section is not None:
3140 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3141 if item.find("rasd:Description",namespaces).text == "Hard disk":
3142 disk_size = item.find("rasd:HostResource" ,namespaces
3143 ).attrib["{"+namespaces['vm']+"}capacity"]
3144
3145 vm_virtual_hardware_info["disk_size"]= disk_size
3146 break
3147
3148 for link in virtual_hardware_section:
3149 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3150 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3151 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3152 break
3153
3154 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3155 except Exception as exp :
3156 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3157 return parsed_respond
3158
3159 def acuire_console(self, vm_uuid=None):
3160
3161 if vm_uuid is None:
3162 return None
3163
3164 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3165 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3166 console_dict = vm_dict['acquireTicket']
3167 console_rest_call = console_dict['href']
3168
3169 response = Http.post(url=console_rest_call,
3170 headers=self.vca.vcloud_session.get_vcloud_headers(),
3171 verify=self.vca.verify,
3172 logger=self.vca.logger)
3173 if response.status_code == 403:
3174 response = self.retry_rest('POST', console_rest_call)
3175
3176 if response.status_code == requests.codes.ok:
3177 return response.content
3178
3179 return None
3180
3181 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3182 """
3183 Method retrieve vm disk details
3184
3185 Args:
3186 vapp_uuid - is vapp identifier.
3187 flavor_disk - disk size as specified in VNFD (flavor)
3188
3189 Returns:
3190 The return network uuid or return None
3191 """
3192 status = None
3193 try:
3194 #Flavor disk is in GB convert it into MB
3195 flavor_disk = int(flavor_disk) * 1024
3196 vm_details = self.get_vapp_details_rest(vapp_uuid)
3197 if vm_details:
3198 vm_name = vm_details["name"]
3199 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3200
3201 if vm_details and "vm_virtual_hardware" in vm_details:
3202 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3203 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3204
3205 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3206
3207 if flavor_disk > vm_disk:
3208 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3209 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3210 vm_disk, flavor_disk ))
3211 else:
3212 status = True
3213 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3214
3215 return status
3216 except Exception as exp:
3217 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3218
3219
3220 def modify_vm_disk_rest(self, disk_href , disk_size):
3221 """
3222 Method retrieve modify vm disk size
3223
3224 Args:
3225 disk_href - vCD API URL to GET and PUT disk data
3226 disk_size - disk size as specified in VNFD (flavor)
3227
3228 Returns:
3229 The return network uuid or return None
3230 """
3231 if disk_href is None or disk_size is None:
3232 return None
3233
3234 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3235 response = Http.get(url=disk_href,
3236 headers=self.vca.vcloud_session.get_vcloud_headers(),
3237 verify=self.vca.verify,
3238 logger=self.vca.logger)
3239
3240 if response.status_code == 403:
3241 response = self.retry_rest('GET', disk_href)
3242
3243 if response.status_code != requests.codes.ok:
3244 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3245 response.status_code))
3246 return None
3247 try:
3248 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3249 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3250 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3251
3252 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3253 if item.find("rasd:Description",namespaces).text == "Hard disk":
3254 disk_item = item.find("rasd:HostResource" ,namespaces )
3255 if disk_item is not None:
3256 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3257 break
3258
3259 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3260 xml_declaration=True)
3261
3262 #Send PUT request to modify disk size
3263 headers = self.vca.vcloud_session.get_vcloud_headers()
3264 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3265
3266 response = Http.put(url=disk_href,
3267 data=data,
3268 headers=headers,
3269 verify=self.vca.verify, logger=self.logger)
3270
3271 if response.status_code == 403:
3272 add_headers = {'Content-Type': headers['Content-Type']}
3273 response = self.retry_rest('PUT', disk_href, add_headers, data)
3274
3275 if response.status_code != 202:
3276 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3277 response.status_code))
3278 else:
3279 modify_disk_task = taskType.parseString(response.content, True)
3280 if type(modify_disk_task) is GenericTask:
3281 status = self.vca.block_until_completed(modify_disk_task)
3282 return status
3283
3284 return None
3285
3286 except Exception as exp :
3287 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3288 return None
3289
3290 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3291 """
3292 Method to attach pci devices to VM
3293
3294 Args:
3295 vapp_uuid - uuid of vApp/VM
3296 pci_devices - pci devices infromation as specified in VNFD (flavor)
3297
3298 Returns:
3299 The status of add pci device task , vm object and
3300 vcenter_conect object
3301 """
3302 vm_obj = None
3303 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3304 vcenter_conect, content = self.get_vcenter_content()
3305 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3306
3307 if vm_moref_id:
3308 try:
3309 no_of_pci_devices = len(pci_devices)
3310 if no_of_pci_devices > 0:
3311 #Get VM and its host
3312 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3313 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3314 if host_obj and vm_obj:
3315 #get PCI devies from host on which vapp is currently installed
3316 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3317
3318 if avilable_pci_devices is None:
3319 #find other hosts with active pci devices
3320 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3321 content,
3322 no_of_pci_devices
3323 )
3324
3325 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3326 #Migrate vm to the host where PCI devices are availble
3327 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3328 task = self.relocate_vm(new_host_obj, vm_obj)
3329 if task is not None:
3330 result = self.wait_for_vcenter_task(task, vcenter_conect)
3331 self.logger.info("Migrate VM status: {}".format(result))
3332 host_obj = new_host_obj
3333 else:
3334 self.logger.info("Fail to migrate VM : {}".format(result))
3335 raise vimconn.vimconnNotFoundException(
3336 "Fail to migrate VM : {} to host {}".format(
3337 vmname_andid,
3338 new_host_obj)
3339 )
3340
3341 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3342 #Add PCI devices one by one
3343 for pci_device in avilable_pci_devices:
3344 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3345 if task:
3346 status= self.wait_for_vcenter_task(task, vcenter_conect)
3347 if status:
3348 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3349 else:
3350 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3351 return True, vm_obj, vcenter_conect
3352 else:
3353 self.logger.error("Currently there is no host with"\
3354 " {} number of avaialble PCI devices required for VM {}".format(
3355 no_of_pci_devices,
3356 vmname_andid)
3357 )
3358 raise vimconn.vimconnNotFoundException(
3359 "Currently there is no host with {} "\
3360 "number of avaialble PCI devices required for VM {}".format(
3361 no_of_pci_devices,
3362 vmname_andid))
3363 else:
3364 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3365
3366 except vmodl.MethodFault as error:
3367 self.logger.error("Error occurred while adding PCI devices {} ",error)
3368 return None, vm_obj, vcenter_conect
3369
3370 def get_vm_obj(self, content, mob_id):
3371 """
3372 Method to get the vsphere VM object associated with a given morf ID
3373 Args:
3374 vapp_uuid - uuid of vApp/VM
3375 content - vCenter content object
3376 mob_id - mob_id of VM
3377
3378 Returns:
3379 VM and host object
3380 """
3381 vm_obj = None
3382 host_obj = None
3383 try :
3384 container = content.viewManager.CreateContainerView(content.rootFolder,
3385 [vim.VirtualMachine], True
3386 )
3387 for vm in container.view:
3388 mobID = vm._GetMoId()
3389 if mobID == mob_id:
3390 vm_obj = vm
3391 host_obj = vm_obj.runtime.host
3392 break
3393 except Exception as exp:
3394 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3395 return host_obj, vm_obj
3396
3397 def get_pci_devices(self, host, need_devices):
3398 """
3399 Method to get the details of pci devices on given host
3400 Args:
3401 host - vSphere host object
3402 need_devices - number of pci devices needed on host
3403
3404 Returns:
3405 array of pci devices
3406 """
3407 all_devices = []
3408 all_device_ids = []
3409 used_devices_ids = []
3410
3411 try:
3412 if host:
3413 pciPassthruInfo = host.config.pciPassthruInfo
3414 pciDevies = host.hardware.pciDevice
3415
3416 for pci_status in pciPassthruInfo:
3417 if pci_status.passthruActive:
3418 for device in pciDevies:
3419 if device.id == pci_status.id:
3420 all_device_ids.append(device.id)
3421 all_devices.append(device)
3422
3423 #check if devices are in use
3424 avalible_devices = all_devices
3425 for vm in host.vm:
3426 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3427 vm_devices = vm.config.hardware.device
3428 for device in vm_devices:
3429 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3430 if device.backing.id in all_device_ids:
3431 for use_device in avalible_devices:
3432 if use_device.id == device.backing.id:
3433 avalible_devices.remove(use_device)
3434 used_devices_ids.append(device.backing.id)
3435 self.logger.debug("Device {} from devices {}"\
3436 "is in use".format(device.backing.id,
3437 device)
3438 )
3439 if len(avalible_devices) < need_devices:
3440 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3441 need_devices))
3442 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3443 avalible_devices))
3444 return None
3445 else:
3446 required_devices = avalible_devices[:need_devices]
3447 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3448 len(avalible_devices),
3449 host,
3450 need_devices))
3451 self.logger.info("Retruning {} devices as {}".format(need_devices,
3452 required_devices ))
3453 return required_devices
3454
3455 except Exception as exp:
3456 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3457
3458 return None
3459
3460 def get_host_and_PCIdevices(self, content, need_devices):
3461 """
3462 Method to get the details of pci devices infromation on all hosts
3463
3464 Args:
3465 content - vSphere host object
3466 need_devices - number of pci devices needed on host
3467
3468 Returns:
3469 array of pci devices and host object
3470 """
3471 host_obj = None
3472 pci_device_objs = None
3473 try:
3474 if content:
3475 container = content.viewManager.CreateContainerView(content.rootFolder,
3476 [vim.HostSystem], True)
3477 for host in container.view:
3478 devices = self.get_pci_devices(host, need_devices)
3479 if devices:
3480 host_obj = host
3481 pci_device_objs = devices
3482 break
3483 except Exception as exp:
3484 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3485
3486 return host_obj,pci_device_objs
3487
3488 def relocate_vm(self, dest_host, vm) :
3489 """
3490 Method to get the relocate VM to new host
3491
3492 Args:
3493 dest_host - vSphere host object
3494 vm - vSphere VM object
3495
3496 Returns:
3497 task object
3498 """
3499 task = None
3500 try:
3501 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3502 task = vm.Relocate(relocate_spec)
3503 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3504 except Exception as exp:
3505 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3506 dest_host, vm, exp))
3507 return task
3508
3509 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3510 """
3511 Waits and provides updates on a vSphere task
3512 """
3513 while task.info.state == vim.TaskInfo.State.running:
3514 time.sleep(2)
3515
3516 if task.info.state == vim.TaskInfo.State.success:
3517 if task.info.result is not None and not hideResult:
3518 self.logger.info('{} completed successfully, result: {}'.format(
3519 actionName,
3520 task.info.result))
3521 else:
3522 self.logger.info('Task {} completed successfully.'.format(actionName))
3523 else:
3524 self.logger.error('{} did not complete successfully: {} '.format(
3525 actionName,
3526 task.info.error)
3527 )
3528
3529 return task.info.result
3530
3531 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3532 """
3533 Method to add pci device in given VM
3534
3535 Args:
3536 host_object - vSphere host object
3537 vm_object - vSphere VM object
3538 host_pci_dev - host_pci_dev must be one of the devices from the
3539 host_object.hardware.pciDevice list
3540 which is configured as a PCI passthrough device
3541
3542 Returns:
3543 task object
3544 """
3545 task = None
3546 if vm_object and host_object and host_pci_dev:
3547 try :
3548 #Add PCI device to VM
3549 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3550 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3551
3552 if host_pci_dev.id not in systemid_by_pciid:
3553 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3554 return None
3555
3556 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3557 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3558 id=host_pci_dev.id,
3559 systemId=systemid_by_pciid[host_pci_dev.id],
3560 vendorId=host_pci_dev.vendorId,
3561 deviceName=host_pci_dev.deviceName)
3562
3563 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3564
3565 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3566 new_device_config.operation = "add"
3567 vmConfigSpec = vim.vm.ConfigSpec()
3568 vmConfigSpec.deviceChange = [new_device_config]
3569
3570 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3571 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3572 host_pci_dev, vm_object, host_object)
3573 )
3574 except Exception as exp:
3575 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3576 host_pci_dev,
3577 vm_object,
3578 exp))
3579 return task
3580
3581 def get_vm_vcenter_info(self):
3582 """
3583 Method to get details of vCenter and vm
3584
3585 Args:
3586 vapp_uuid - uuid of vApp or VM
3587
3588 Returns:
3589 Moref Id of VM and deails of vCenter
3590 """
3591 vm_vcenter_info = {}
3592
3593 if self.vcenter_ip is not None:
3594 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3595 else:
3596 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3597 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3598 if self.vcenter_port is not None:
3599 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3600 else:
3601 raise vimconn.vimconnException(message="vCenter port is not provided."\
3602 " Please provide vCenter port while attaching datacenter to tenant in --config")
3603 if self.vcenter_user is not None:
3604 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3605 else:
3606 raise vimconn.vimconnException(message="vCenter user is not provided."\
3607 " Please provide vCenter user while attaching datacenter to tenant in --config")
3608
3609 if self.vcenter_password is not None:
3610 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3611 else:
3612 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3613 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3614
3615 return vm_vcenter_info
3616
3617
3618 def get_vm_pci_details(self, vmuuid):
3619 """
3620 Method to get VM PCI device details from vCenter
3621
3622 Args:
3623 vm_obj - vSphere VM object
3624
3625 Returns:
3626 dict of PCI devives attached to VM
3627
3628 """
3629 vm_pci_devices_info = {}
3630 try:
3631 vcenter_conect, content = self.get_vcenter_content()
3632 vm_moref_id = self.get_vm_moref_id(vmuuid)
3633 if vm_moref_id:
3634 #Get VM and its host
3635 if content:
3636 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3637 if host_obj and vm_obj:
3638 vm_pci_devices_info["host_name"]= host_obj.name
3639 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3640 for device in vm_obj.config.hardware.device:
3641 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3642 device_details={'devide_id':device.backing.id,
3643 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3644 }
3645 vm_pci_devices_info[device.deviceInfo.label] = device_details
3646 else:
3647 self.logger.error("Can not connect to vCenter while getting "\
3648 "PCI devices infromationn")
3649 return vm_pci_devices_info
3650 except Exception as exp:
3651 self.logger.error("Error occurred while getting VM infromationn"\
3652 " for VM : {}".format(exp))
3653 raise vimconn.vimconnException(message=exp)
3654
3655 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3656 """
3657 Method to add network adapter type to vm
3658 Args :
3659 network_name - name of network
3660 primary_nic_index - int value for primary nic index
3661 nicIndex - int value for nic index
3662 nic_type - specify model name to which add to vm
3663 Returns:
3664 None
3665 """
3666
3667 try:
3668 ip_address = None
3669 floating_ip = False
3670 if 'floating_ip' in net: floating_ip = net['floating_ip']
3671
3672 # Stub for ip_address feature
3673 if 'ip_address' in net: ip_address = net['ip_address']
3674
3675 if floating_ip:
3676 allocation_mode = "POOL"
3677 elif ip_address:
3678 allocation_mode = "MANUAL"
3679 else:
3680 allocation_mode = "DHCP"
3681
3682 if not nic_type:
3683 for vms in vapp._get_vms():
3684 vm_id = (vms.id).split(':')[-1]
3685
3686 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3687
3688 response = Http.get(url=url_rest_call,
3689 headers=self.vca.vcloud_session.get_vcloud_headers(),
3690 verify=self.vca.verify,
3691 logger=self.vca.logger)
3692
3693 if response.status_code == 403:
3694 response = self.retry_rest('GET', url_rest_call)
3695
3696 if response.status_code != 200:
3697 self.logger.error("REST call {} failed reason : {}"\
3698 "status code : {}".format(url_rest_call,
3699 response.content,
3700 response.status_code))
3701 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3702 "network connection section")
3703
3704 data = response.content
3705 if '<PrimaryNetworkConnectionIndex>' not in data:
3706 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3707 <NetworkConnection network="{}">
3708 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3709 <IsConnected>true</IsConnected>
3710 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3711 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3712 allocation_mode)
3713 # Stub for ip_address feature
3714 if ip_address:
3715 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3716 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3717
3718 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3719 else:
3720 new_item = """<NetworkConnection network="{}">
3721 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3722 <IsConnected>true</IsConnected>
3723 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3724 </NetworkConnection>""".format(network_name, nicIndex,
3725 allocation_mode)
3726 # Stub for ip_address feature
3727 if ip_address:
3728 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3729 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3730
3731 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3732
3733 headers = self.vca.vcloud_session.get_vcloud_headers()
3734 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3735 response = Http.put(url=url_rest_call, headers=headers, data=data,
3736 verify=self.vca.verify,
3737 logger=self.vca.logger)
3738
3739 if response.status_code == 403:
3740 add_headers = {'Content-Type': headers['Content-Type']}
3741 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3742
3743 if response.status_code != 202:
3744 self.logger.error("REST call {} failed reason : {}"\
3745 "status code : {} ".format(url_rest_call,
3746 response.content,
3747 response.status_code))
3748 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3749 "network connection section")
3750 else:
3751 nic_task = taskType.parseString(response.content, True)
3752 if isinstance(nic_task, GenericTask):
3753 self.vca.block_until_completed(nic_task)
3754 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3755 "default NIC type".format(vm_id))
3756 else:
3757 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3758 "connect NIC type".format(vm_id))
3759 else:
3760 for vms in vapp._get_vms():
3761 vm_id = (vms.id).split(':')[-1]
3762
3763 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3764
3765 response = Http.get(url=url_rest_call,
3766 headers=self.vca.vcloud_session.get_vcloud_headers(),
3767 verify=self.vca.verify,
3768 logger=self.vca.logger)
3769
3770 if response.status_code == 403:
3771 response = self.retry_rest('GET', url_rest_call)
3772
3773 if response.status_code != 200:
3774 self.logger.error("REST call {} failed reason : {}"\
3775 "status code : {}".format(url_rest_call,
3776 response.content,
3777 response.status_code))
3778 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3779 "network connection section")
3780 data = response.content
3781 if '<PrimaryNetworkConnectionIndex>' not in data:
3782 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3783 <NetworkConnection network="{}">
3784 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3785 <IsConnected>true</IsConnected>
3786 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3787 <NetworkAdapterType>{}</NetworkAdapterType>
3788 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3789 allocation_mode, nic_type)
3790 # Stub for ip_address feature
3791 if ip_address:
3792 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3793 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3794
3795 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3796 else:
3797 new_item = """<NetworkConnection network="{}">
3798 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3799 <IsConnected>true</IsConnected>
3800 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3801 <NetworkAdapterType>{}</NetworkAdapterType>
3802 </NetworkConnection>""".format(network_name, nicIndex,
3803 allocation_mode, nic_type)
3804 # Stub for ip_address feature
3805 if ip_address:
3806 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3807 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3808
3809 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3810
3811 headers = self.vca.vcloud_session.get_vcloud_headers()
3812 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3813 response = Http.put(url=url_rest_call, headers=headers, data=data,
3814 verify=self.vca.verify,
3815 logger=self.vca.logger)
3816
3817 if response.status_code == 403:
3818 add_headers = {'Content-Type': headers['Content-Type']}
3819 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3820
3821 if response.status_code != 202:
3822 self.logger.error("REST call {} failed reason : {}"\
3823 "status code : {}".format(url_rest_call,
3824 response.content,
3825 response.status_code))
3826 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3827 "network connection section")
3828 else:
3829 nic_task = taskType.parseString(response.content, True)
3830 if isinstance(nic_task, GenericTask):
3831 self.vca.block_until_completed(nic_task)
3832 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3833 "conneced to NIC type {}".format(vm_id, nic_type))
3834 else:
3835 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3836 "failed to connect NIC type {}".format(vm_id, nic_type))
3837 except Exception as exp:
3838 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3839 "while adding Network adapter")
3840 raise vimconn.vimconnException(message=exp)
3841
3842
3843 def set_numa_affinity(self, vmuuid, paired_threads_id):
3844 """
3845 Method to assign numa affinity in vm configuration parammeters
3846 Args :
3847 vmuuid - vm uuid
3848 paired_threads_id - one or more virtual processor
3849 numbers
3850 Returns:
3851 return if True
3852 """
3853 try:
3854 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3855 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3856 context = None
3857 if hasattr(ssl, '_create_unverified_context'):
3858 context = ssl._create_unverified_context()
3859 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3860 pwd=self.passwd, port=int(vm_vcenter_port),
3861 sslContext=context)
3862 atexit.register(Disconnect, vcenter_conect)
3863 content = vcenter_conect.RetrieveContent()
3864
3865 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3866 if vm_obj:
3867 config_spec = vim.vm.ConfigSpec()
3868 config_spec.extraConfig = []
3869 opt = vim.option.OptionValue()
3870 opt.key = 'numa.nodeAffinity'
3871 opt.value = str(paired_threads_id)
3872 config_spec.extraConfig.append(opt)
3873 task = vm_obj.ReconfigVM_Task(config_spec)
3874 if task:
3875 result = self.wait_for_vcenter_task(task, vcenter_conect)
3876 extra_config = vm_obj.config.extraConfig
3877 flag = False
3878 for opts in extra_config:
3879 if 'numa.nodeAffinity' in opts.key:
3880 flag = True
3881 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
3882 "value {} for vm {}".format(opt.value, vm_obj))
3883 if flag:
3884 return
3885 else:
3886 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
3887 except Exception as exp:
3888 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
3889 "for VM {} : {}".format(vm_obj, vm_moref_id))
3890 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
3891 "affinity".format(exp))
3892
3893
3894
3895 def cloud_init(self, vapp, cloud_config):
3896 """
3897 Method to inject ssh-key
3898 vapp - vapp object
3899 cloud_config a dictionary with:
3900 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
3901 'users': (optional) list of users to be inserted, each item is a dict with:
3902 'name': (mandatory) user name,
3903 'key-pairs': (optional) list of strings with the public key to be inserted to the user
3904 'user-data': (optional) string is a text script to be passed directly to cloud-init
3905 'config-files': (optional). List of files to be transferred. Each item is a dict with:
3906 'dest': (mandatory) string with the destination absolute path
3907 'encoding': (optional, by default text). Can be one of:
3908 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
3909 'content' (mandatory): string with the content of the file
3910 'permissions': (optional) string with file permissions, typically octal notation '0644'
3911 'owner': (optional) file owner, string with the format 'owner:group'
3912 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
3913 """
3914
3915 try:
3916 if isinstance(cloud_config, dict):
3917 key_pairs = []
3918 userdata = []
3919 if "key-pairs" in cloud_config:
3920 key_pairs = cloud_config["key-pairs"]
3921
3922 if "users" in cloud_config:
3923 userdata = cloud_config["users"]
3924
3925 for key in key_pairs:
3926 for user in userdata:
3927 if 'name' in user: user_name = user['name']
3928 if 'key-pairs' in user and len(user['key-pairs']) > 0:
3929 for user_key in user['key-pairs']:
3930 customize_script = """
3931 #!/bin/bash
3932 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3933 if [ "$1" = "precustomization" ];then
3934 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3935 if [ ! -d /root/.ssh ];then
3936 mkdir /root/.ssh
3937 chown root:root /root/.ssh
3938 chmod 700 /root/.ssh
3939 touch /root/.ssh/authorized_keys
3940 chown root:root /root/.ssh/authorized_keys
3941 chmod 600 /root/.ssh/authorized_keys
3942 # make centos with selinux happy
3943 which restorecon && restorecon -Rv /root/.ssh
3944 echo '{key}' >> /root/.ssh/authorized_keys
3945 else
3946 touch /root/.ssh/authorized_keys
3947 chown root:root /root/.ssh/authorized_keys
3948 chmod 600 /root/.ssh/authorized_keys
3949 echo '{key}' >> /root/.ssh/authorized_keys
3950 fi
3951 if [ -d /home/{user_name} ];then
3952 if [ ! -d /home/{user_name}/.ssh ];then
3953 mkdir /home/{user_name}/.ssh
3954 chown {user_name}:{user_name} /home/{user_name}/.ssh
3955 chmod 700 /home/{user_name}/.ssh
3956 touch /home/{user_name}/.ssh/authorized_keys
3957 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3958 chmod 600 /home/{user_name}/.ssh/authorized_keys
3959 # make centos with selinux happy
3960 which restorecon && restorecon -Rv /home/{user_name}/.ssh
3961 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3962 else
3963 touch /home/{user_name}/.ssh/authorized_keys
3964 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3965 chmod 600 /home/{user_name}/.ssh/authorized_keys
3966 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3967 fi
3968 fi
3969 fi""".format(key=key, user_name=user_name, user_key=user_key)
3970
3971 for vm in vapp._get_vms():
3972 vm_name = vm.name
3973 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
3974 if isinstance(task, GenericTask):
3975 self.vca.block_until_completed(task)
3976 self.logger.info("cloud_init : customized guest os task "\
3977 "completed for VM {}".format(vm_name))
3978 else:
3979 self.logger.error("cloud_init : task for customized guest os"\
3980 "failed for VM {}".format(vm_name))
3981 except Exception as exp:
3982 self.logger.error("cloud_init : exception occurred while injecting "\
3983 "ssh-key")
3984 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
3985 "ssh-key".format(exp))
3986
3987
3988 def add_new_disk(self, vapp_uuid, disk_size):
3989 """
3990 Method to create an empty vm disk
3991
3992 Args:
3993 vapp_uuid - is vapp identifier.
3994 disk_size - size of disk to be created in GB
3995
3996 Returns:
3997 None
3998 """
3999 status = False
4000 vm_details = None
4001 try:
4002 #Disk size in GB, convert it into MB
4003 if disk_size is not None:
4004 disk_size_mb = int(disk_size) * 1024
4005 vm_details = self.get_vapp_details_rest(vapp_uuid)
4006
4007 if vm_details and "vm_virtual_hardware" in vm_details:
4008 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4009 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4010 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4011
4012 except Exception as exp:
4013 msg = "Error occurred while creating new disk {}.".format(exp)
4014 self.rollback_newvm(vapp_uuid, msg)
4015
4016 if status:
4017 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4018 else:
4019 #If failed to add disk, delete VM
4020 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4021 self.rollback_newvm(vapp_uuid, msg)
4022
4023
4024 def add_new_disk_rest(self, disk_href, disk_size_mb):
4025 """
4026 Retrives vApp Disks section & add new empty disk
4027
4028 Args:
4029 disk_href: Disk section href to addd disk
4030 disk_size_mb: Disk size in MB
4031
4032 Returns: Status of add new disk task
4033 """
4034 status = False
4035 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4036 response = Http.get(url=disk_href,
4037 headers=self.vca.vcloud_session.get_vcloud_headers(),
4038 verify=self.vca.verify,
4039 logger=self.vca.logger)
4040
4041 if response.status_code == 403:
4042 response = self.retry_rest('GET', disk_href)
4043
4044 if response.status_code != requests.codes.ok:
4045 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4046 .format(disk_href, response.status_code))
4047 return status
4048 try:
4049 #Find but type & max of instance IDs assigned to disks
4050 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4051 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4052 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4053 instance_id = 0
4054 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4055 if item.find("rasd:Description",namespaces).text == "Hard disk":
4056 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4057 if inst_id > instance_id:
4058 instance_id = inst_id
4059 disk_item = item.find("rasd:HostResource" ,namespaces)
4060 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4061 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4062
4063 instance_id = instance_id + 1
4064 new_item = """<Item>
4065 <rasd:Description>Hard disk</rasd:Description>
4066 <rasd:ElementName>New disk</rasd:ElementName>
4067 <rasd:HostResource
4068 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4069 vcloud:capacity="{}"
4070 vcloud:busSubType="{}"
4071 vcloud:busType="{}"></rasd:HostResource>
4072 <rasd:InstanceID>{}</rasd:InstanceID>
4073 <rasd:ResourceType>17</rasd:ResourceType>
4074 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4075
4076 new_data = response.content
4077 #Add new item at the bottom
4078 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4079
4080 # Send PUT request to modify virtual hardware section with new disk
4081 headers = self.vca.vcloud_session.get_vcloud_headers()
4082 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4083
4084 response = Http.put(url=disk_href,
4085 data=new_data,
4086 headers=headers,
4087 verify=self.vca.verify, logger=self.logger)
4088
4089 if response.status_code == 403:
4090 add_headers = {'Content-Type': headers['Content-Type']}
4091 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4092
4093 if response.status_code != 202:
4094 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4095 .format(disk_href, response.status_code, response.content))
4096 else:
4097 add_disk_task = taskType.parseString(response.content, True)
4098 if type(add_disk_task) is GenericTask:
4099 status = self.vca.block_until_completed(add_disk_task)
4100 if not status:
4101 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4102
4103 except Exception as exp:
4104 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4105
4106 return status
4107
4108
4109 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4110 """
4111 Method to add existing disk to vm
4112 Args :
4113 catalogs - List of VDC catalogs
4114 image_id - Catalog ID
4115 template_name - Name of template in catalog
4116 vapp_uuid - UUID of vApp
4117 Returns:
4118 None
4119 """
4120 disk_info = None
4121 vcenter_conect, content = self.get_vcenter_content()
4122 #find moref-id of vm in image
4123 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4124 image_id=image_id,
4125 )
4126
4127 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4128 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4129 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4130 if catalog_vm_moref_id:
4131 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4132 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4133 if catalog_vm_obj:
4134 #find existing disk
4135 disk_info = self.find_disk(catalog_vm_obj)
4136 else:
4137 exp_msg = "No VM with image id {} found".format(image_id)
4138 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4139 else:
4140 exp_msg = "No Image found with image ID {} ".format(image_id)
4141 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4142
4143 if disk_info:
4144 self.logger.info("Existing disk_info : {}".format(disk_info))
4145 #get VM
4146 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4147 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4148 if vm_obj:
4149 status = self.add_disk(vcenter_conect=vcenter_conect,
4150 vm=vm_obj,
4151 disk_info=disk_info,
4152 size=size,
4153 vapp_uuid=vapp_uuid
4154 )
4155 if status:
4156 self.logger.info("Disk from image id {} added to {}".format(image_id,
4157 vm_obj.config.name)
4158 )
4159 else:
4160 msg = "No disk found with image id {} to add in VM {}".format(
4161 image_id,
4162 vm_obj.config.name)
4163 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4164
4165
4166 def find_disk(self, vm_obj):
4167 """
4168 Method to find details of existing disk in VM
4169 Args :
4170 vm_obj - vCenter object of VM
4171 image_id - Catalog ID
4172 Returns:
4173 disk_info : dict of disk details
4174 """
4175 disk_info = {}
4176 if vm_obj:
4177 try:
4178 devices = vm_obj.config.hardware.device
4179 for device in devices:
4180 if type(device) is vim.vm.device.VirtualDisk:
4181 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4182 disk_info["full_path"] = device.backing.fileName
4183 disk_info["datastore"] = device.backing.datastore
4184 disk_info["capacityKB"] = device.capacityInKB
4185 break
4186 except Exception as exp:
4187 self.logger.error("find_disk() : exception occurred while "\
4188 "getting existing disk details :{}".format(exp))
4189 return disk_info
4190
4191
4192 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4193 """
4194 Method to add existing disk in VM
4195 Args :
4196 vcenter_conect - vCenter content object
4197 vm - vCenter vm object
4198 disk_info : dict of disk details
4199 Returns:
4200 status : status of add disk task
4201 """
4202 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4203 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4204 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4205 if size is not None:
4206 #Convert size from GB to KB
4207 sizeKB = int(size) * 1024 * 1024
4208 #compare size of existing disk and user given size.Assign whicherver is greater
4209 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4210 sizeKB, capacityKB))
4211 if sizeKB > capacityKB:
4212 capacityKB = sizeKB
4213
4214 if datastore and fullpath and capacityKB:
4215 try:
4216 spec = vim.vm.ConfigSpec()
4217 # get all disks on a VM, set unit_number to the next available
4218 unit_number = 0
4219 for dev in vm.config.hardware.device:
4220 if hasattr(dev.backing, 'fileName'):
4221 unit_number = int(dev.unitNumber) + 1
4222 # unit_number 7 reserved for scsi controller
4223 if unit_number == 7:
4224 unit_number += 1
4225 if isinstance(dev, vim.vm.device.VirtualDisk):
4226 #vim.vm.device.VirtualSCSIController
4227 controller_key = dev.controllerKey
4228
4229 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4230 unit_number, controller_key))
4231 # add disk here
4232 dev_changes = []
4233 disk_spec = vim.vm.device.VirtualDeviceSpec()
4234 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4235 disk_spec.device = vim.vm.device.VirtualDisk()
4236 disk_spec.device.backing = \
4237 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4238 disk_spec.device.backing.thinProvisioned = True
4239 disk_spec.device.backing.diskMode = 'persistent'
4240 disk_spec.device.backing.datastore = datastore
4241 disk_spec.device.backing.fileName = fullpath
4242
4243 disk_spec.device.unitNumber = unit_number
4244 disk_spec.device.capacityInKB = capacityKB
4245 disk_spec.device.controllerKey = controller_key
4246 dev_changes.append(disk_spec)
4247 spec.deviceChange = dev_changes
4248 task = vm.ReconfigVM_Task(spec=spec)
4249 status = self.wait_for_vcenter_task(task, vcenter_conect)
4250 return status
4251 except Exception as exp:
4252 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4253 "{} to vm {}".format(exp,
4254 fullpath,
4255 vm.config.name)
4256 self.rollback_newvm(vapp_uuid, exp_msg)
4257 else:
4258 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4259 self.rollback_newvm(vapp_uuid, msg)
4260
4261
4262 def get_vcenter_content(self):
4263 """
4264 Get the vsphere content object
4265 """
4266 try:
4267 vm_vcenter_info = self.get_vm_vcenter_info()
4268 except Exception as exp:
4269 self.logger.error("Error occurred while getting vCenter infromationn"\
4270 " for VM : {}".format(exp))
4271 raise vimconn.vimconnException(message=exp)
4272
4273 context = None
4274 if hasattr(ssl, '_create_unverified_context'):
4275 context = ssl._create_unverified_context()
4276
4277 vcenter_conect = SmartConnect(
4278 host=vm_vcenter_info["vm_vcenter_ip"],
4279 user=vm_vcenter_info["vm_vcenter_user"],
4280 pwd=vm_vcenter_info["vm_vcenter_password"],
4281 port=int(vm_vcenter_info["vm_vcenter_port"]),
4282 sslContext=context
4283 )
4284 atexit.register(Disconnect, vcenter_conect)
4285 content = vcenter_conect.RetrieveContent()
4286 return vcenter_conect, content
4287
4288
4289 def get_vm_moref_id(self, vapp_uuid):
4290 """
4291 Get the moref_id of given VM
4292 """
4293 try:
4294 if vapp_uuid:
4295 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4296 if vm_details and "vm_vcenter_info" in vm_details:
4297 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4298
4299 return vm_moref_id
4300
4301 except Exception as exp:
4302 self.logger.error("Error occurred while getting VM moref ID "\
4303 " for VM : {}".format(exp))
4304 return None
4305
4306
4307 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4308 """
4309 Method to get vApp template details
4310 Args :
4311 catalogs - list of VDC catalogs
4312 image_id - Catalog ID to find
4313 template_name : template name in catalog
4314 Returns:
4315 parsed_respond : dict of vApp tempalte details
4316 """
4317 parsed_response = {}
4318
4319 vca = self.connect_as_admin()
4320 if not vca:
4321 raise vimconn.vimconnConnectionException("self.connect() is failed")
4322
4323 try:
4324 catalog = self.get_catalog_obj(image_id, catalogs)
4325 if catalog:
4326 template_name = self.get_catalogbyid(image_id, catalogs)
4327 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4328 if len(catalog_items) == 1:
4329 response = Http.get(catalog_items[0].get_href(),
4330 headers=vca.vcloud_session.get_vcloud_headers(),
4331 verify=vca.verify,
4332 logger=vca.logger)
4333 catalogItem = XmlElementTree.fromstring(response.content)
4334 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4335 vapp_tempalte_href = entity.get("href")
4336 #get vapp details and parse moref id
4337
4338 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4339 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4340 'vmw': 'http://www.vmware.com/schema/ovf',
4341 'vm': 'http://www.vmware.com/vcloud/v1.5',
4342 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4343 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4344 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4345 }
4346
4347 if vca.vcloud_session and vca.vcloud_session.organization:
4348 response = Http.get(url=vapp_tempalte_href,
4349 headers=vca.vcloud_session.get_vcloud_headers(),
4350 verify=vca.verify,
4351 logger=vca.logger
4352 )
4353
4354 if response.status_code != requests.codes.ok:
4355 self.logger.debug("REST API call {} failed. Return status code {}".format(
4356 vapp_tempalte_href, response.status_code))
4357
4358 else:
4359 xmlroot_respond = XmlElementTree.fromstring(response.content)
4360 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4361 if children_section is not None:
4362 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4363 if vCloud_extension_section is not None:
4364 vm_vcenter_info = {}
4365 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4366 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4367 if vmext is not None:
4368 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4369 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4370
4371 except Exception as exp :
4372 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4373
4374 return parsed_response
4375
4376
4377 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4378 """
4379 Method to delete vApp
4380 Args :
4381 vapp_uuid - vApp UUID
4382 msg - Error message to be logged
4383 exp_type : Exception type
4384 Returns:
4385 None
4386 """
4387 if vapp_uuid:
4388 status = self.delete_vminstance(vapp_uuid)
4389 else:
4390 msg = "No vApp ID"
4391 self.logger.error(msg)
4392 if exp_type == "Genric":
4393 raise vimconn.vimconnException(msg)
4394 elif exp_type == "NotFound":
4395 raise vimconn.vimconnNotFoundException(message=msg)
4396
4397 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4398 """
4399 Method to attach SRIOV adapters to VM
4400
4401 Args:
4402 vapp_uuid - uuid of vApp/VM
4403 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4404 vmname_andid - vmname
4405
4406 Returns:
4407 The status of add SRIOV adapter task , vm object and
4408 vcenter_conect object
4409 """
4410 vm_obj = None
4411 vcenter_conect, content = self.get_vcenter_content()
4412 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4413
4414 if vm_moref_id:
4415 try:
4416 no_of_sriov_devices = len(sriov_nets)
4417 if no_of_sriov_devices > 0:
4418 #Get VM and its host
4419 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4420 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4421 if host_obj and vm_obj:
4422 #get SRIOV devies from host on which vapp is currently installed
4423 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4424 no_of_sriov_devices,
4425 )
4426
4427 if len(avilable_sriov_devices) == 0:
4428 #find other hosts with active pci devices
4429 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4430 content,
4431 no_of_sriov_devices,
4432 )
4433
4434 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4435 #Migrate vm to the host where SRIOV devices are available
4436 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4437 new_host_obj))
4438 task = self.relocate_vm(new_host_obj, vm_obj)
4439 if task is not None:
4440 result = self.wait_for_vcenter_task(task, vcenter_conect)
4441 self.logger.info("Migrate VM status: {}".format(result))
4442 host_obj = new_host_obj
4443 else:
4444 self.logger.info("Fail to migrate VM : {}".format(result))
4445 raise vimconn.vimconnNotFoundException(
4446 "Fail to migrate VM : {} to host {}".format(
4447 vmname_andid,
4448 new_host_obj)
4449 )
4450
4451 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4452 #Add SRIOV devices one by one
4453 for sriov_net in sriov_nets:
4454 network_name = sriov_net.get('net_id')
4455 dvs_portgr_name = self.create_dvPort_group(network_name)
4456 if sriov_net.get('type') == "VF":
4457 #add vlan ID ,Modify portgroup for vlan ID
4458 self.configure_vlanID(content, vcenter_conect, network_name)
4459
4460 task = self.add_sriov_to_vm(content,
4461 vm_obj,
4462 host_obj,
4463 network_name,
4464 avilable_sriov_devices[0]
4465 )
4466 if task:
4467 status= self.wait_for_vcenter_task(task, vcenter_conect)
4468 if status:
4469 self.logger.info("Added SRIOV {} to VM {}".format(
4470 no_of_sriov_devices,
4471 str(vm_obj)))
4472 else:
4473 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4474 no_of_sriov_devices,
4475 str(vm_obj)))
4476 raise vimconn.vimconnUnexpectedResponse(
4477 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4478 )
4479 return True, vm_obj, vcenter_conect
4480 else:
4481 self.logger.error("Currently there is no host with"\
4482 " {} number of avaialble SRIOV "\
4483 "VFs required for VM {}".format(
4484 no_of_sriov_devices,
4485 vmname_andid)
4486 )
4487 raise vimconn.vimconnNotFoundException(
4488 "Currently there is no host with {} "\
4489 "number of avaialble SRIOV devices required for VM {}".format(
4490 no_of_sriov_devices,
4491 vmname_andid))
4492 else:
4493 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4494
4495 except vmodl.MethodFault as error:
4496 self.logger.error("Error occurred while adding SRIOV {} ",error)
4497 return None, vm_obj, vcenter_conect
4498
4499
4500 def get_sriov_devices(self,host, no_of_vfs):
4501 """
4502 Method to get the details of SRIOV devices on given host
4503 Args:
4504 host - vSphere host object
4505 no_of_vfs - number of VFs needed on host
4506
4507 Returns:
4508 array of SRIOV devices
4509 """
4510 sriovInfo=[]
4511 if host:
4512 for device in host.config.pciPassthruInfo:
4513 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4514 if device.numVirtualFunction >= no_of_vfs:
4515 sriovInfo.append(device)
4516 break
4517 return sriovInfo
4518
4519
4520 def get_host_and_sriov_devices(self, content, no_of_vfs):
4521 """
4522 Method to get the details of SRIOV devices infromation on all hosts
4523
4524 Args:
4525 content - vSphere host object
4526 no_of_vfs - number of pci VFs needed on host
4527
4528 Returns:
4529 array of SRIOV devices and host object
4530 """
4531 host_obj = None
4532 sriov_device_objs = None
4533 try:
4534 if content:
4535 container = content.viewManager.CreateContainerView(content.rootFolder,
4536 [vim.HostSystem], True)
4537 for host in container.view:
4538 devices = self.get_sriov_devices(host, no_of_vfs)
4539 if devices:
4540 host_obj = host
4541 sriov_device_objs = devices
4542 break
4543 except Exception as exp:
4544 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4545
4546 return host_obj,sriov_device_objs
4547
4548
4549 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4550 """
4551 Method to add SRIOV adapter to vm
4552
4553 Args:
4554 host_obj - vSphere host object
4555 vm_obj - vSphere vm object
4556 content - vCenter content object
4557 network_name - name of distributed virtaul portgroup
4558 sriov_device - SRIOV device info
4559
4560 Returns:
4561 task object
4562 """
4563 devices = []
4564 vnic_label = "sriov nic"
4565 try:
4566 dvs_portgr = self.get_dvport_group(network_name)
4567 network_name = dvs_portgr.name
4568 nic = vim.vm.device.VirtualDeviceSpec()
4569 # VM device
4570 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4571 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4572 nic.device.addressType = 'assigned'
4573 #nic.device.key = 13016
4574 nic.device.deviceInfo = vim.Description()
4575 nic.device.deviceInfo.label = vnic_label
4576 nic.device.deviceInfo.summary = network_name
4577 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4578
4579 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4580 nic.device.backing.deviceName = network_name
4581 nic.device.backing.useAutoDetect = False
4582 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4583 nic.device.connectable.startConnected = True
4584 nic.device.connectable.allowGuestControl = True
4585
4586 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4587 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4588 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4589
4590 devices.append(nic)
4591 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4592 task = vm_obj.ReconfigVM_Task(vmconf)
4593 return task
4594 except Exception as exp:
4595 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4596 return None
4597
4598
4599 def create_dvPort_group(self, network_name):
4600 """
4601 Method to create disributed virtual portgroup
4602
4603 Args:
4604 network_name - name of network/portgroup
4605
4606 Returns:
4607 portgroup key
4608 """
4609 try:
4610 new_network_name = [network_name, '-', str(uuid.uuid4())]
4611 network_name=''.join(new_network_name)
4612 vcenter_conect, content = self.get_vcenter_content()
4613
4614 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4615 if dv_switch:
4616 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4617 dv_pg_spec.name = network_name
4618
4619 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4620 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4621 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4622 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4623 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4624 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4625
4626 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4627 self.wait_for_vcenter_task(task, vcenter_conect)
4628
4629 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4630 if dvPort_group:
4631 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4632 return dvPort_group.key
4633 else:
4634 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4635
4636 except Exception as exp:
4637 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4638 " : {}".format(network_name, exp))
4639 return None
4640
4641 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4642 """
4643 Method to reconfigure disributed virtual portgroup
4644
4645 Args:
4646 dvPort_group_name - name of disributed virtual portgroup
4647 content - vCenter content object
4648 config_info - disributed virtual portgroup configuration
4649
4650 Returns:
4651 task object
4652 """
4653 try:
4654 dvPort_group = self.get_dvport_group(dvPort_group_name)
4655 if dvPort_group:
4656 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4657 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4658 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4659 if "vlanID" in config_info:
4660 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4661 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4662
4663 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4664 return task
4665 else:
4666 return None
4667 except Exception as exp:
4668 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4669 " : {}".format(dvPort_group_name, exp))
4670 return None
4671
4672
4673 def destroy_dvport_group(self , dvPort_group_name):
4674 """
4675 Method to destroy disributed virtual portgroup
4676
4677 Args:
4678 network_name - name of network/portgroup
4679
4680 Returns:
4681 True if portgroup successfully got deleted else false
4682 """
4683 vcenter_conect, content = self.get_vcenter_content()
4684 try:
4685 status = None
4686 dvPort_group = self.get_dvport_group(dvPort_group_name)
4687 if dvPort_group:
4688 task = dvPort_group.Destroy_Task()
4689 status = self.wait_for_vcenter_task(task, vcenter_conect)
4690 return status
4691 except vmodl.MethodFault as exp:
4692 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4693 exp, dvPort_group_name))
4694 return None
4695
4696
4697 def get_dvport_group(self, dvPort_group_name):
4698 """
4699 Method to get disributed virtual portgroup
4700
4701 Args:
4702 network_name - name of network/portgroup
4703
4704 Returns:
4705 portgroup object
4706 """
4707 vcenter_conect, content = self.get_vcenter_content()
4708 dvPort_group = None
4709 try:
4710 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4711 for item in container.view:
4712 if item.key == dvPort_group_name:
4713 dvPort_group = item
4714 break
4715 return dvPort_group
4716 except vmodl.MethodFault as exp:
4717 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4718 exp, dvPort_group_name))
4719 return None
4720
4721 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4722 """
4723 Method to get disributed virtual portgroup vlanID
4724
4725 Args:
4726 network_name - name of network/portgroup
4727
4728 Returns:
4729 vlan ID
4730 """
4731 vlanId = None
4732 try:
4733 dvPort_group = self.get_dvport_group(dvPort_group_name)
4734 if dvPort_group:
4735 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4736 except vmodl.MethodFault as exp:
4737 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4738 exp, dvPort_group_name))
4739 return vlanId
4740
4741
4742 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4743 """
4744 Method to configure vlanID in disributed virtual portgroup vlanID
4745
4746 Args:
4747 network_name - name of network/portgroup
4748
4749 Returns:
4750 None
4751 """
4752 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4753 if vlanID == 0:
4754 #configure vlanID
4755 vlanID = self.genrate_vlanID(dvPort_group_name)
4756 config = {"vlanID":vlanID}
4757 task = self.reconfig_portgroup(content, dvPort_group_name,
4758 config_info=config)
4759 if task:
4760 status= self.wait_for_vcenter_task(task, vcenter_conect)
4761 if status:
4762 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4763 dvPort_group_name,vlanID))
4764 else:
4765 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4766 dvPort_group_name, vlanID))
4767
4768
4769 def genrate_vlanID(self, network_name):
4770 """
4771 Method to get unused vlanID
4772 Args:
4773 network_name - name of network/portgroup
4774 Returns:
4775 vlanID
4776 """
4777 vlan_id = None
4778 used_ids = []
4779 if self.config.get('vlanID_range') == None:
4780 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4781 "at config value before creating sriov network with vlan tag")
4782 if "used_vlanIDs" not in self.persistent_info:
4783 self.persistent_info["used_vlanIDs"] = {}
4784 else:
4785 used_ids = self.persistent_info["used_vlanIDs"].values()
4786
4787 for vlanID_range in self.config.get('vlanID_range'):
4788 start_vlanid , end_vlanid = vlanID_range.split("-")
4789 if start_vlanid > end_vlanid:
4790 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4791 vlanID_range))
4792
4793 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4794 if id not in used_ids:
4795 vlan_id = id
4796 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4797 return vlan_id
4798 if vlan_id is None:
4799 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4800
4801
4802 def get_obj(self, content, vimtype, name):
4803 """
4804 Get the vsphere object associated with a given text name
4805 """
4806 obj = None
4807 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4808 for item in container.view:
4809 if item.name == name:
4810 obj = item
4811 break
4812 return obj
4813
4814
4815 def insert_media_to_vm(self, vapp, image_id):
4816 """
4817 Method to insert media CD-ROM (ISO image) from catalog to vm.
4818 vapp - vapp object to get vm id
4819 Image_id - image id for cdrom to be inerted to vm
4820 """
4821 # create connection object
4822 vca = self.connect()
4823 try:
4824 # fetching catalog details
4825 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
4826 response = Http.get(url=rest_url,
4827 headers=vca.vcloud_session.get_vcloud_headers(),
4828 verify=vca.verify,
4829 logger=vca.logger)
4830
4831 if response.status_code != 200:
4832 self.logger.error("REST call {} failed reason : {}"\
4833 "status code : {}".format(url_rest_call,
4834 response.content,
4835 response.status_code))
4836 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
4837 "catalog details")
4838 # searching iso name and id
4839 iso_name,media_id = self.get_media_details(vca, response.content)
4840
4841 if iso_name and media_id:
4842 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4843 <ns6:MediaInsertOrEjectParams
4844 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
4845 <ns6:Media
4846 type="application/vnd.vmware.vcloud.media+xml"
4847 name="{}.iso"
4848 id="urn:vcloud:media:{}"
4849 href="https://{}/api/media/{}"/>
4850 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
4851 vca.host,media_id)
4852
4853 for vms in vapp._get_vms():
4854 vm_id = (vms.id).split(':')[-1]
4855
4856 headers = vca.vcloud_session.get_vcloud_headers()
4857 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
4858 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
4859
4860 response = Http.post(url=rest_url,
4861 headers=headers,
4862 data=data,
4863 verify=vca.verify,
4864 logger=vca.logger)
4865
4866 if response.status_code != 202:
4867 self.logger.error("Failed to insert CD-ROM to vm")
4868 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
4869 "ISO image to vm")
4870 else:
4871 task = taskType.parseString(response.content, True)
4872 if isinstance(task, GenericTask):
4873 vca.block_until_completed(task)
4874 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
4875 " image to vm {}".format(vm_id))
4876 except Exception as exp:
4877 self.logger.error("insert_media_to_vm() : exception occurred "\
4878 "while inserting media CD-ROM")
4879 raise vimconn.vimconnException(message=exp)
4880
4881
4882 def get_media_details(self, vca, content):
4883 """
4884 Method to get catalog item details
4885 vca - connection object
4886 content - Catalog details
4887 Return - Media name, media id
4888 """
4889 cataloghref_list = []
4890 try:
4891 if content:
4892 vm_list_xmlroot = XmlElementTree.fromstring(content)
4893 for child in vm_list_xmlroot.iter():
4894 if 'CatalogItem' in child.tag:
4895 cataloghref_list.append(child.attrib.get('href'))
4896 if cataloghref_list is not None:
4897 for href in cataloghref_list:
4898 if href:
4899 response = Http.get(url=href,
4900 headers=vca.vcloud_session.get_vcloud_headers(),
4901 verify=vca.verify,
4902 logger=vca.logger)
4903 if response.status_code != 200:
4904 self.logger.error("REST call {} failed reason : {}"\
4905 "status code : {}".format(href,
4906 response.content,
4907 response.status_code))
4908 raise vimconn.vimconnException("get_media_details : Failed to get "\
4909 "catalogitem details")
4910 list_xmlroot = XmlElementTree.fromstring(response.content)
4911 for child in list_xmlroot.iter():
4912 if 'Entity' in child.tag:
4913 if 'media' in child.attrib.get('href'):
4914 name = child.attrib.get('name')
4915 media_id = child.attrib.get('href').split('/').pop()
4916 return name,media_id
4917 else:
4918 self.logger.debug("Media name and id not found")
4919 return False,False
4920 except Exception as exp:
4921 self.logger.error("get_media_details : exception occurred "\
4922 "getting media details")
4923 raise vimconn.vimconnException(message=exp)
4924
4925
4926 def retry_rest(self, api, url, add_headers=None, data=None):
4927 """ Method to get Token & retry respective REST request
4928 Args:
4929 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
4930 url - request url to be used
4931 add_headers - Additional headers (optional)
4932 data - Request payload data to be passed in request
4933 Returns:
4934 response - Response of request
4935 """
4936 response = None
4937
4938 #Get token
4939 self.get_token()
4940
4941 headers=self.vca.vcloud_session.get_vcloud_headers()
4942
4943 if add_headers:
4944 headers.update(add_headers)
4945
4946 if api == 'GET':
4947 response = Http.get(url=url,
4948 headers=headers,
4949 verify=self.vca.verify,
4950 logger=self.vca.logger)
4951 return response
4952 elif api == 'PUT':
4953 if headers:
4954 headers.append
4955 response = Http.put(url=url,
4956 data=data,
4957 headers=headers,
4958 verify=self.vca.verify, logger=self.logger)
4959 return response
4960 elif api == 'POST':
4961 response = Http.post(url=url,
4962 headers=headers,
4963 verify=self.vca.verify,
4964 logger=self.vca.logger)
4965
4966 def get_token(self):
4967 """ Generate a new token if expired
4968
4969 Returns:
4970 The return vca object that letter can be used to connect to vCloud director as admin for VDC
4971 """
4972 vca = None
4973
4974 try:
4975 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
4976 self.user,
4977 self.org_name))
4978 vca = VCA(host=self.url,
4979 username=self.user,
4980 service_type=STANDALONE,
4981 version=VCAVERSION,
4982 verify=False,
4983 log=False)
4984
4985 result = vca.login(password=self.passwd, org=self.org_name)
4986 if not result:
4987 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
4988
4989 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
4990 if result is True:
4991 self.logger.info(
4992 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
4993
4994 except:
4995 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
4996 "{} as user: {}".format(self.org_name, self.user))
4997
4998 if not vca:
4999 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
5000
5001 #Update vca
5002 self.vca = vca
5003
5004
5005 def get_vdc_details(self):
5006 """ Get VDC details using pyVcloud Lib
5007
5008 Returns vdc object
5009 """
5010 vdc = self.vca.get_vdc(self.tenant_name)
5011
5012 #Retry once, if failed by refreshing token
5013 if vdc is None:
5014 self.get_token()
5015 vdc = self.vca.get_vdc(self.tenant_name)
5016
5017 return vdc
5018