Merge branch 'packaging'
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 try:
361 if self.org_uuid is None:
362 org_dict = self.get_org_list()
363 for org in org_dict:
364 # we set org UUID at the init phase but we can do it only when we have valid credential.
365 if org_dict[org] == self.org_name:
366 self.org_uuid = org
367 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
368 break
369 else:
370 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
371
372 # if well good we require for org details
373 org_details_dict = self.get_org(org_uuid=self.org_uuid)
374
375 # we have two case if we want to initialize VDC ID or VDC name at run time
376 # tenant_name provided but no tenant id
377 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
378 vdcs_dict = org_details_dict['vdcs']
379 for vdc in vdcs_dict:
380 if vdcs_dict[vdc] == self.tenant_name:
381 self.tenant_id = vdc
382 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
383 self.org_name))
384 break
385 else:
386 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
387 # case two we have tenant_id but we don't have tenant name so we find and set it.
388 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
389 vdcs_dict = org_details_dict['vdcs']
390 for vdc in vdcs_dict:
391 if vdc == self.tenant_id:
392 self.tenant_name = vdcs_dict[vdc]
393 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
394 self.org_name))
395 break
396 else:
397 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
398 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
399 except:
400 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
401 self.logger.debug(traceback.format_exc())
402 self.org_uuid = None
403
404 def new_tenant(self, tenant_name=None, tenant_description=None):
405 """ Method adds a new tenant to VIM with this name.
406 This action requires access to create VDC action in vCloud director.
407
408 Args:
409 tenant_name is tenant_name to be created.
410 tenant_description not used for this call
411
412 Return:
413 returns the tenant identifier in UUID format.
414 If action is failed method will throw vimconn.vimconnException method
415 """
416 vdc_task = self.create_vdc(vdc_name=tenant_name)
417 if vdc_task is not None:
418 vdc_uuid, value = vdc_task.popitem()
419 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
420 return vdc_uuid
421 else:
422 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
423
424 def delete_tenant(self, tenant_id=None):
425 """Delete a tenant from VIM"""
426 'Returns the tenant identifier'
427 raise vimconn.vimconnNotImplemented("Should have implemented this")
428
429 def get_tenant_list(self, filter_dict={}):
430 """Obtain tenants of VIM
431 filter_dict can contain the following keys:
432 name: filter by tenant name
433 id: filter by tenant uuid/id
434 <other VIM specific>
435 Returns the tenant list of dictionaries:
436 [{'name':'<name>, 'id':'<id>, ...}, ...]
437
438 """
439 org_dict = self.get_org(self.org_uuid)
440 vdcs_dict = org_dict['vdcs']
441
442 vdclist = []
443 try:
444 for k in vdcs_dict:
445 entry = {'name': vdcs_dict[k], 'id': k}
446 # if caller didn't specify dictionary we return all tenants.
447 if filter_dict is not None and filter_dict:
448 filtered_entry = entry.copy()
449 filtered_dict = set(entry.keys()) - set(filter_dict)
450 for unwanted_key in filtered_dict: del entry[unwanted_key]
451 if filter_dict == entry:
452 vdclist.append(filtered_entry)
453 else:
454 vdclist.append(entry)
455 except:
456 self.logger.debug("Error in get_tenant_list()")
457 self.logger.debug(traceback.format_exc())
458 raise vimconn.vimconnException("Incorrect state. {}")
459
460 return vdclist
461
462 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
463 """Adds a tenant network to VIM
464 net_name is the name
465 net_type can be 'bridge','data'.'ptp'.
466 ip_profile is a dict containing the IP parameters of the network
467 shared is a boolean
468 Returns the network identifier"""
469
470 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
471 .format(net_name, net_type, ip_profile, shared))
472
473 isshared = 'false'
474 if shared:
475 isshared = 'true'
476
477 # ############# Stub code for SRIOV #################
478 # if net_type == "data" or net_type == "ptp":
479 # if self.config.get('dv_switch_name') == None:
480 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
481 # network_uuid = self.create_dvPort_group(net_name)
482
483 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
484 ip_profile=ip_profile, isshared=isshared)
485 if network_uuid is not None:
486 return network_uuid
487 else:
488 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
489
490 def get_vcd_network_list(self):
491 """ Method available organization for a logged in tenant
492
493 Returns:
494 The return vca object that letter can be used to connect to vcloud direct as admin
495 """
496
497 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
498 vca = self.connect()
499 if not vca:
500 raise vimconn.vimconnConnectionException("self.connect() is failed.")
501
502 if not self.tenant_name:
503 raise vimconn.vimconnConnectionException("Tenant name is empty.")
504
505 vdc = vca.get_vdc(self.tenant_name)
506 if vdc is None:
507 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
508
509 vdc_uuid = vdc.get_id().split(":")[3]
510 networks = vca.get_networks(vdc.get_name())
511 network_list = []
512 try:
513 for network in networks:
514 filter_dict = {}
515 netid = network.get_id().split(":")
516 if len(netid) != 4:
517 continue
518
519 filter_dict["name"] = network.get_name()
520 filter_dict["id"] = netid[3]
521 filter_dict["shared"] = network.get_IsShared()
522 filter_dict["tenant_id"] = vdc_uuid
523 if network.get_status() == 1:
524 filter_dict["admin_state_up"] = True
525 else:
526 filter_dict["admin_state_up"] = False
527 filter_dict["status"] = "ACTIVE"
528 filter_dict["type"] = "bridge"
529 network_list.append(filter_dict)
530 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
531 except:
532 self.logger.debug("Error in get_vcd_network_list")
533 self.logger.debug(traceback.format_exc())
534 pass
535
536 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
537 return network_list
538
539 def get_network_list(self, filter_dict={}):
540 """Obtain tenant networks of VIM
541 Filter_dict can be:
542 name: network name OR/AND
543 id: network uuid OR/AND
544 shared: boolean OR/AND
545 tenant_id: tenant OR/AND
546 admin_state_up: boolean
547 status: 'ACTIVE'
548
549 [{key : value , key : value}]
550
551 Returns the network list of dictionaries:
552 [{<the fields at Filter_dict plus some VIM specific>}, ...]
553 List can be empty
554 """
555
556 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
557 vca = self.connect()
558 if not vca:
559 raise vimconn.vimconnConnectionException("self.connect() is failed.")
560
561 if not self.tenant_name:
562 raise vimconn.vimconnConnectionException("Tenant name is empty.")
563
564 vdc = vca.get_vdc(self.tenant_name)
565 if vdc is None:
566 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
567
568 try:
569 vdcid = vdc.get_id().split(":")[3]
570 networks = vca.get_networks(vdc.get_name())
571 network_list = []
572
573 for network in networks:
574 filter_entry = {}
575 net_uuid = network.get_id().split(":")
576 if len(net_uuid) != 4:
577 continue
578 else:
579 net_uuid = net_uuid[3]
580 # create dict entry
581 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
582 vdcid,
583 network.get_name()))
584 filter_entry["name"] = network.get_name()
585 filter_entry["id"] = net_uuid
586 filter_entry["shared"] = network.get_IsShared()
587 filter_entry["tenant_id"] = vdcid
588 if network.get_status() == 1:
589 filter_entry["admin_state_up"] = True
590 else:
591 filter_entry["admin_state_up"] = False
592 filter_entry["status"] = "ACTIVE"
593 filter_entry["type"] = "bridge"
594 filtered_entry = filter_entry.copy()
595
596 if filter_dict is not None and filter_dict:
597 # we remove all the key : value we don't care and match only
598 # respected field
599 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
600 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
601 if filter_dict == filter_entry:
602 network_list.append(filtered_entry)
603 else:
604 network_list.append(filtered_entry)
605 except:
606 self.logger.debug("Error in get_vcd_network_list")
607 self.logger.debug(traceback.format_exc())
608
609 self.logger.debug("Returning {}".format(network_list))
610 return network_list
611
612 def get_network(self, net_id):
613 """Method obtains network details of net_id VIM network
614 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
615
616 vca = self.connect()
617 if not vca:
618 raise vimconn.vimconnConnectionException("self.connect() is failed")
619
620 try:
621 vdc = vca.get_vdc(self.tenant_name)
622 vdc_id = vdc.get_id().split(":")[3]
623
624 networks = vca.get_networks(vdc.get_name())
625 filter_dict = {}
626
627 for network in networks:
628 vdc_network_id = network.get_id().split(":")
629 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
630 filter_dict["name"] = network.get_name()
631 filter_dict["id"] = vdc_network_id[3]
632 filter_dict["shared"] = network.get_IsShared()
633 filter_dict["tenant_id"] = vdc_id
634 if network.get_status() == 1:
635 filter_dict["admin_state_up"] = True
636 else:
637 filter_dict["admin_state_up"] = False
638 filter_dict["status"] = "ACTIVE"
639 filter_dict["type"] = "bridge"
640 self.logger.debug("Returning {}".format(filter_dict))
641 return filter_dict
642 except:
643 self.logger.debug("Error in get_network")
644 self.logger.debug(traceback.format_exc())
645
646 return filter_dict
647
648 def delete_network(self, net_id):
649 """
650 Method Deletes a tenant network from VIM, provide the network id.
651
652 Returns the network identifier or raise an exception
653 """
654
655 vca = self.connect()
656 if not vca:
657 raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
658
659 # ############# Stub code for SRIOV #################
660 # dvport_group = self.get_dvport_group(net_id)
661 # if dvport_group:
662 # #delete portgroup
663 # status = self.destroy_dvport_group(net_id)
664 # if status:
665 # # Remove vlanID from persistent info
666 # if net_id in self.persistent_info["used_vlanIDs"]:
667 # del self.persistent_info["used_vlanIDs"][net_id]
668 #
669 # return net_id
670
671 vcd_network = self.get_vcd_network(network_uuid=net_id)
672 if vcd_network is not None and vcd_network:
673 if self.delete_network_action(network_uuid=net_id):
674 return net_id
675 else:
676 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
677
678 def refresh_nets_status(self, net_list):
679 """Get the status of the networks
680 Params: the list of network identifiers
681 Returns a dictionary with:
682 net_id: #VIM id of this network
683 status: #Mandatory. Text with one of:
684 # DELETED (not found at vim)
685 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
686 # OTHER (Vim reported other status not understood)
687 # ERROR (VIM indicates an ERROR status)
688 # ACTIVE, INACTIVE, DOWN (admin down),
689 # BUILD (on building process)
690 #
691 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
692 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
693
694 """
695
696 vca = self.connect()
697 if not vca:
698 raise vimconn.vimconnConnectionException("self.connect() is failed")
699
700 dict_entry = {}
701 try:
702 for net in net_list:
703 errormsg = ''
704 vcd_network = self.get_vcd_network(network_uuid=net)
705 if vcd_network is not None and vcd_network:
706 if vcd_network['status'] == '1':
707 status = 'ACTIVE'
708 else:
709 status = 'DOWN'
710 else:
711 status = 'DELETED'
712 errormsg = 'Network not found.'
713
714 dict_entry[net] = {'status': status, 'error_msg': errormsg,
715 'vim_info': yaml.safe_dump(vcd_network)}
716 except:
717 self.logger.debug("Error in refresh_nets_status")
718 self.logger.debug(traceback.format_exc())
719
720 return dict_entry
721
722 def get_flavor(self, flavor_id):
723 """Obtain flavor details from the VIM
724 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
725 """
726 if flavor_id not in vimconnector.flavorlist:
727 raise vimconn.vimconnNotFoundException("Flavor not found.")
728 return vimconnector.flavorlist[flavor_id]
729
730 def new_flavor(self, flavor_data):
731 """Adds a tenant flavor to VIM
732 flavor_data contains a dictionary with information, keys:
733 name: flavor name
734 ram: memory (cloud type) in MBytes
735 vpcus: cpus (cloud type)
736 extended: EPA parameters
737 - numas: #items requested in same NUMA
738 memory: number of 1G huge pages memory
739 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
740 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
741 - name: interface name
742 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
743 bandwidth: X Gbps; requested guarantee bandwidth
744 vpci: requested virtual PCI address
745 disk: disk size
746 is_public:
747 #TODO to concrete
748 Returns the flavor identifier"""
749
750 # generate a new uuid put to internal dict and return it.
751 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
752 new_flavor=flavor_data
753 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
754 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
755 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
756
757 extended_flv = flavor_data.get("extended")
758 if extended_flv:
759 numas=extended_flv.get("numas")
760 if numas:
761 for numa in numas:
762 #overwrite ram and vcpus
763 ram = numa['memory']*1024
764 if 'paired-threads' in numa:
765 cpu = numa['paired-threads']*2
766 elif 'cores' in numa:
767 cpu = numa['cores']
768 elif 'threads' in numa:
769 cpu = numa['threads']
770
771 new_flavor[FLAVOR_RAM_KEY] = ram
772 new_flavor[FLAVOR_VCPUS_KEY] = cpu
773 new_flavor[FLAVOR_DISK_KEY] = disk
774 # generate a new uuid put to internal dict and return it.
775 flavor_id = uuid.uuid4()
776 vimconnector.flavorlist[str(flavor_id)] = new_flavor
777 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
778
779 return str(flavor_id)
780
781 def delete_flavor(self, flavor_id):
782 """Deletes a tenant flavor from VIM identify by its id
783
784 Returns the used id or raise an exception
785 """
786 if flavor_id not in vimconnector.flavorlist:
787 raise vimconn.vimconnNotFoundException("Flavor not found.")
788
789 vimconnector.flavorlist.pop(flavor_id, None)
790 return flavor_id
791
792 def new_image(self, image_dict):
793 """
794 Adds a tenant image to VIM
795 Returns:
796 200, image-id if the image is created
797 <0, message if there is an error
798 """
799
800 return self.get_image_id_from_path(image_dict['location'])
801
802 def delete_image(self, image_id):
803 """
804
805 :param image_id:
806 :return:
807 """
808
809 raise vimconn.vimconnNotImplemented("Should have implemented this")
810
811 def catalog_exists(self, catalog_name, catalogs):
812 """
813
814 :param catalog_name:
815 :param catalogs:
816 :return:
817 """
818 for catalog in catalogs:
819 if catalog.name == catalog_name:
820 return True
821 return False
822
823 def create_vimcatalog(self, vca=None, catalog_name=None):
824 """ Create new catalog entry in vCloud director.
825
826 Args
827 vca: vCloud director.
828 catalog_name catalog that client wish to create. Note no validation done for a name.
829 Client must make sure that provide valid string representation.
830
831 Return (bool) True if catalog created.
832
833 """
834 try:
835 task = vca.create_catalog(catalog_name, catalog_name)
836 result = vca.block_until_completed(task)
837 if not result:
838 return False
839 catalogs = vca.get_catalogs()
840 except:
841 return False
842 return self.catalog_exists(catalog_name, catalogs)
843
844 # noinspection PyIncorrectDocstring
845 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
846 description='', progress=False, chunk_bytes=128 * 1024):
847 """
848 Uploads a OVF file to a vCloud catalog
849
850 :param chunk_bytes:
851 :param progress:
852 :param description:
853 :param image_name:
854 :param vca:
855 :param catalog_name: (str): The name of the catalog to upload the media.
856 :param media_file_name: (str): The name of the local media file to upload.
857 :return: (bool) True if the media file was successfully uploaded, false otherwise.
858 """
859 os.path.isfile(media_file_name)
860 statinfo = os.stat(media_file_name)
861
862 # find a catalog entry where we upload OVF.
863 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
864 # status change.
865 # if VCD can parse OVF we upload VMDK file
866 try:
867 for catalog in vca.get_catalogs():
868 if catalog_name != catalog.name:
869 continue
870 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
871 link.get_rel() == 'add', catalog.get_Link())
872 assert len(link) == 1
873 data = """
874 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
875 """ % (escape(catalog_name), escape(description))
876 headers = vca.vcloud_session.get_vcloud_headers()
877 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
878 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
879 if response.status_code == requests.codes.created:
880 catalogItem = XmlElementTree.fromstring(response.content)
881 entity = [child for child in catalogItem if
882 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
883 href = entity.get('href')
884 template = href
885 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
886 verify=vca.verify, logger=self.logger)
887
888 if response.status_code == requests.codes.ok:
889 media = mediaType.parseString(response.content, True)
890 link = filter(lambda link: link.get_rel() == 'upload:default',
891 media.get_Files().get_File()[0].get_Link())[0]
892 headers = vca.vcloud_session.get_vcloud_headers()
893 headers['Content-Type'] = 'Content-Type text/xml'
894 response = Http.put(link.get_href(),
895 data=open(media_file_name, 'rb'),
896 headers=headers,
897 verify=vca.verify, logger=self.logger)
898 if response.status_code != requests.codes.ok:
899 self.logger.debug(
900 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
901 media_file_name))
902 return False
903
904 # TODO fix this with aync block
905 time.sleep(5)
906
907 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
908
909 # uploading VMDK file
910 # check status of OVF upload and upload remaining files.
911 response = Http.get(template,
912 headers=vca.vcloud_session.get_vcloud_headers(),
913 verify=vca.verify,
914 logger=self.logger)
915
916 if response.status_code == requests.codes.ok:
917 media = mediaType.parseString(response.content, True)
918 number_of_files = len(media.get_Files().get_File())
919 for index in xrange(0, number_of_files):
920 links_list = filter(lambda link: link.get_rel() == 'upload:default',
921 media.get_Files().get_File()[index].get_Link())
922 for link in links_list:
923 # we skip ovf since it already uploaded.
924 if 'ovf' in link.get_href():
925 continue
926 # The OVF file and VMDK must be in a same directory
927 head, tail = os.path.split(media_file_name)
928 file_vmdk = head + '/' + link.get_href().split("/")[-1]
929 if not os.path.isfile(file_vmdk):
930 return False
931 statinfo = os.stat(file_vmdk)
932 if statinfo.st_size == 0:
933 return False
934 hrefvmdk = link.get_href()
935
936 if progress:
937 print("Uploading file: {}".format(file_vmdk))
938 if progress:
939 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
940 FileTransferSpeed()]
941 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
942
943 bytes_transferred = 0
944 f = open(file_vmdk, 'rb')
945 while bytes_transferred < statinfo.st_size:
946 my_bytes = f.read(chunk_bytes)
947 if len(my_bytes) <= chunk_bytes:
948 headers = vca.vcloud_session.get_vcloud_headers()
949 headers['Content-Range'] = 'bytes %s-%s/%s' % (
950 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
951 headers['Content-Length'] = str(len(my_bytes))
952 response = Http.put(hrefvmdk,
953 headers=headers,
954 data=my_bytes,
955 verify=vca.verify,
956 logger=None)
957
958 if response.status_code == requests.codes.ok:
959 bytes_transferred += len(my_bytes)
960 if progress:
961 progress_bar.update(bytes_transferred)
962 else:
963 self.logger.debug(
964 'file upload failed with error: [%s] %s' % (response.status_code,
965 response.content))
966
967 f.close()
968 return False
969 f.close()
970 if progress:
971 progress_bar.finish()
972 time.sleep(10)
973 return True
974 else:
975 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
976 format(catalog_name, media_file_name))
977 return False
978 except Exception as exp:
979 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
980 .format(catalog_name,media_file_name, exp))
981 raise vimconn.vimconnException(
982 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
983 .format(catalog_name,media_file_name, exp))
984
985 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
986 return False
987
988 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
989 """Upload media file"""
990 # TODO add named parameters for readability
991
992 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
993 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
994
995 def validate_uuid4(self, uuid_string=None):
996 """ Method validate correct format of UUID.
997
998 Return: true if string represent valid uuid
999 """
1000 try:
1001 val = uuid.UUID(uuid_string, version=4)
1002 except ValueError:
1003 return False
1004 return True
1005
1006 def get_catalogid(self, catalog_name=None, catalogs=None):
1007 """ Method check catalog and return catalog ID in UUID format.
1008
1009 Args
1010 catalog_name: catalog name as string
1011 catalogs: list of catalogs.
1012
1013 Return: catalogs uuid
1014 """
1015
1016 for catalog in catalogs:
1017 if catalog.name == catalog_name:
1018 catalog_id = catalog.get_id().split(":")
1019 return catalog_id[3]
1020 return None
1021
1022 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1023 """ Method check catalog and return catalog name lookup done by catalog UUID.
1024
1025 Args
1026 catalog_name: catalog name as string
1027 catalogs: list of catalogs.
1028
1029 Return: catalogs name or None
1030 """
1031
1032 if not self.validate_uuid4(uuid_string=catalog_uuid):
1033 return None
1034
1035 for catalog in catalogs:
1036 catalog_id = catalog.get_id().split(":")[3]
1037 if catalog_id == catalog_uuid:
1038 return catalog.name
1039 return None
1040
1041 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1042 """ Method check catalog and return catalog name lookup done by catalog UUID.
1043
1044 Args
1045 catalog_name: catalog name as string
1046 catalogs: list of catalogs.
1047
1048 Return: catalogs name or None
1049 """
1050
1051 if not self.validate_uuid4(uuid_string=catalog_uuid):
1052 return None
1053
1054 for catalog in catalogs:
1055 catalog_id = catalog.get_id().split(":")[3]
1056 if catalog_id == catalog_uuid:
1057 return catalog
1058 return None
1059
1060 def get_image_id_from_path(self, path=None, progress=False):
1061 """ Method upload OVF image to vCloud director.
1062
1063 Each OVF image represented as single catalog entry in vcloud director.
1064 The method check for existing catalog entry. The check done by file name without file extension.
1065
1066 if given catalog name already present method will respond with existing catalog uuid otherwise
1067 it will create new catalog entry and upload OVF file to newly created catalog.
1068
1069 If method can't create catalog entry or upload a file it will throw exception.
1070
1071 Method accept boolean flag progress that will output progress bar. It useful method
1072 for standalone upload use case. In case to test large file upload.
1073
1074 Args
1075 path: - valid path to OVF file.
1076 progress - boolean progress bar show progress bar.
1077
1078 Return: if image uploaded correct method will provide image catalog UUID.
1079 """
1080 vca = self.connect()
1081 if not vca:
1082 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1083
1084 if not path:
1085 raise vimconn.vimconnException("Image path can't be None.")
1086
1087 if not os.path.isfile(path):
1088 raise vimconn.vimconnException("Can't read file. File not found.")
1089
1090 if not os.access(path, os.R_OK):
1091 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1092
1093 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1094
1095 dirpath, filename = os.path.split(path)
1096 flname, file_extension = os.path.splitext(path)
1097 if file_extension != '.ovf':
1098 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1099 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1100
1101 catalog_name = os.path.splitext(filename)[0]
1102 catalog_md5_name = hashlib.md5(path).hexdigest()
1103 self.logger.debug("File name {} Catalog Name {} file path {} "
1104 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1105
1106 try:
1107 catalogs = vca.get_catalogs()
1108 except Exception as exp:
1109 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1110 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1111
1112 if len(catalogs) == 0:
1113 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1114 result = self.create_vimcatalog(vca, catalog_md5_name)
1115 if not result:
1116 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1117 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1118 media_name=filename, medial_file_name=path, progress=progress)
1119 if not result:
1120 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1121 return self.get_catalogid(catalog_name, vca.get_catalogs())
1122 else:
1123 for catalog in catalogs:
1124 # search for existing catalog if we find same name we return ID
1125 # TODO optimize this
1126 if catalog.name == catalog_md5_name:
1127 self.logger.debug("Found existing catalog entry for {} "
1128 "catalog id {}".format(catalog_name,
1129 self.get_catalogid(catalog_md5_name, catalogs)))
1130 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1131
1132 # if we didn't find existing catalog we create a new one and upload image.
1133 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1134 result = self.create_vimcatalog(vca, catalog_md5_name)
1135 if not result:
1136 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1137
1138 result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
1139 media_name=filename, medial_file_name=path, progress=progress)
1140 if not result:
1141 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1142
1143 return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
1144
1145 def get_image_list(self, filter_dict={}):
1146 '''Obtain tenant images from VIM
1147 Filter_dict can be:
1148 name: image name
1149 id: image uuid
1150 checksum: image checksum
1151 location: image path
1152 Returns the image list of dictionaries:
1153 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1154 List can be empty
1155 '''
1156 vca = self.connect()
1157 if not vca:
1158 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1159 try:
1160 image_list = []
1161 catalogs = vca.get_catalogs()
1162 if len(catalogs) == 0:
1163 return image_list
1164 else:
1165 for catalog in catalogs:
1166 catalog_uuid = catalog.get_id().split(":")[3]
1167 name = catalog.name
1168 filtered_dict = {}
1169 if filter_dict.get("name") and filter_dict["name"] != name:
1170 continue
1171 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1172 continue
1173 filtered_dict ["name"] = name
1174 filtered_dict ["id"] = catalog_uuid
1175 image_list.append(filtered_dict)
1176
1177 self.logger.debug("List of already created catalog items: {}".format(image_list))
1178 return image_list
1179 except Exception as exp:
1180 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1181
1182 def get_vappid(self, vdc=None, vapp_name=None):
1183 """ Method takes vdc object and vApp name and returns vapp uuid or None
1184
1185 Args:
1186 vdc: The VDC object.
1187 vapp_name: is application vappp name identifier
1188
1189 Returns:
1190 The return vApp name otherwise None
1191 """
1192 if vdc is None or vapp_name is None:
1193 return None
1194 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1195 try:
1196 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1197 vdc.ResourceEntities.ResourceEntity)
1198 if len(refs) == 1:
1199 return refs[0].href.split("vapp")[1][1:]
1200 except Exception as e:
1201 self.logger.exception(e)
1202 return False
1203 return None
1204
1205 def check_vapp(self, vdc=None, vapp_uuid=None):
1206 """ Method Method returns True or False if vapp deployed in vCloud director
1207
1208 Args:
1209 vca: Connector to VCA
1210 vdc: The VDC object.
1211 vappid: vappid is application identifier
1212
1213 Returns:
1214 The return True if vApp deployed
1215 :param vdc:
1216 :param vapp_uuid:
1217 """
1218 try:
1219 refs = filter(lambda ref:
1220 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1221 vdc.ResourceEntities.ResourceEntity)
1222 for ref in refs:
1223 vappid = ref.href.split("vapp")[1][1:]
1224 # find vapp with respected vapp uuid
1225 if vappid == vapp_uuid:
1226 return True
1227 except Exception as e:
1228 self.logger.exception(e)
1229 return False
1230 return False
1231
1232 def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
1233 """Method returns vApp name from vCD and lookup done by vapp_id.
1234
1235 Args:
1236 vca: Connector to VCA
1237 vdc: The VDC object.
1238 vapp_uuid: vappid is application identifier
1239
1240 Returns:
1241 The return vApp name otherwise None
1242 """
1243
1244 try:
1245 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1246 vdc.ResourceEntities.ResourceEntity)
1247 for ref in refs:
1248 # we care only about UUID the rest doesn't matter
1249 vappid = ref.href.split("vapp")[1][1:]
1250 if vappid == vapp_uuid:
1251 response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
1252 logger=self.logger)
1253 tree = XmlElementTree.fromstring(response.content)
1254 return tree.attrib['name']
1255 except Exception as e:
1256 self.logger.exception(e)
1257 return None
1258 return None
1259
1260 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1261 cloud_config=None, disk_list=None):
1262 """Adds a VM instance to VIM
1263 Params:
1264 start: indicates if VM must start or boot in pause mode. Ignored
1265 image_id,flavor_id: image and flavor uuid
1266 net_list: list of interfaces, each one is a dictionary with:
1267 name:
1268 net_id: network uuid to connect
1269 vpci: virtual vcpi to assign
1270 model: interface model, virtio, e2000, ...
1271 mac_address:
1272 use: 'data', 'bridge', 'mgmt'
1273 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1274 vim_id: filled/added by this function
1275 cloud_config: can be a text script to be passed directly to cloud-init,
1276 or an object to inject users and ssh keys with format:
1277 key-pairs: [] list of keys to install to the default user
1278 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1279 #TODO ip, security groups
1280 Returns >=0, the instance identifier
1281 <0, error_text
1282 """
1283
1284 self.logger.info("Creating new instance for entry {}".format(name))
1285 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1286 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1287 vca = self.connect()
1288 if not vca:
1289 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1290
1291 #new vm name = vmname + tenant_id + uuid
1292 new_vm_name = [name, '-', str(uuid.uuid4())]
1293 vmname_andid = ''.join(new_vm_name)
1294
1295 # if vm already deployed we return existing uuid
1296 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1297 # if vapp_uuid is not None:
1298 # return vapp_uuid
1299
1300 # we check for presence of VDC, Catalog entry and Flavor.
1301 vdc = vca.get_vdc(self.tenant_name)
1302 if vdc is None:
1303 raise vimconn.vimconnNotFoundException(
1304 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1305 catalogs = vca.get_catalogs()
1306 if catalogs is None:
1307 raise vimconn.vimconnNotFoundException(
1308 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1309
1310 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1311 if catalog_hash_name:
1312 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1313 else:
1314 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1315 "(Failed retrieve catalog information {})".format(name, image_id))
1316
1317
1318 # Set vCPU and Memory based on flavor.
1319 vm_cpus = None
1320 vm_memory = None
1321 vm_disk = None
1322
1323 if flavor_id is not None:
1324 if flavor_id not in vimconnector.flavorlist:
1325 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1326 "Failed retrieve flavor information "
1327 "flavor id {}".format(name, flavor_id))
1328 else:
1329 try:
1330 flavor = vimconnector.flavorlist[flavor_id]
1331 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1332 vm_memory = flavor[FLAVOR_RAM_KEY]
1333 vm_disk = flavor[FLAVOR_DISK_KEY]
1334 extended = flavor.get("extended", None)
1335 if extended:
1336 numas=extended.get("numas", None)
1337
1338 except Exception as exp:
1339 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1340
1341 # image upload creates template name as catalog name space Template.
1342 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1343 power_on = 'false'
1344 if start:
1345 power_on = 'true'
1346
1347 # client must provide at least one entry in net_list if not we report error
1348 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1349 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1350 primary_net = None
1351 primary_netname = None
1352 network_mode = 'bridged'
1353 if net_list is not None and len(net_list) > 0:
1354 for net in net_list:
1355 if 'use' in net and net['use'] == 'mgmt':
1356 primary_net = net
1357 if primary_net is None:
1358 primary_net = net_list[0]
1359
1360 try:
1361 primary_net_id = primary_net['net_id']
1362 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1363 if 'name' in network_dict:
1364 primary_netname = network_dict['name']
1365
1366 except KeyError:
1367 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1368 else:
1369 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1370
1371 # use: 'data', 'bridge', 'mgmt'
1372 # create vApp. Set vcpu and ram based on flavor id.
1373 try:
1374 vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1375 self.get_catalogbyid(image_id, catalogs),
1376 network_name=None, # None while creating vapp
1377 network_mode=network_mode,
1378 vm_name=vmname_andid,
1379 vm_cpus=vm_cpus, # can be None if flavor is None
1380 vm_memory=vm_memory) # can be None if flavor is None
1381
1382 if vapptask is None or vapptask is False:
1383 raise vimconn.vimconnUnexpectedResponse(
1384 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1385 if type(vapptask) is VappTask:
1386 vca.block_until_completed(vapptask)
1387
1388 except Exception as exp:
1389 raise vimconn.vimconnUnexpectedResponse(
1390 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1391
1392 # we should have now vapp in undeployed state.
1393 try:
1394 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1395 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1396 except Exception as exp:
1397 raise vimconn.vimconnUnexpectedResponse(
1398 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1399 .format(vmname_andid, exp))
1400
1401 if vapp is None:
1402 raise vimconn.vimconnUnexpectedResponse(
1403 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1404 vmname_andid))
1405
1406 #Add PCI passthrough/SRIOV configrations
1407 vm_obj = None
1408 pci_devices_info = []
1409 sriov_net_info = []
1410 reserve_memory = False
1411
1412 for net in net_list:
1413 if net["type"]=="PF":
1414 pci_devices_info.append(net)
1415 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1416 sriov_net_info.append(net)
1417
1418 #Add PCI
1419 if len(pci_devices_info) > 0:
1420 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1421 vmname_andid ))
1422 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1423 pci_devices_info,
1424 vmname_andid)
1425 if PCI_devices_status:
1426 self.logger.info("Added PCI devives {} to VM {}".format(
1427 pci_devices_info,
1428 vmname_andid)
1429 )
1430 reserve_memory = True
1431 else:
1432 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1433 pci_devices_info,
1434 vmname_andid)
1435 )
1436 # Modify vm disk
1437 if vm_disk:
1438 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1439 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1440 if result :
1441 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1442
1443 #Add new or existing disks to vApp
1444 if disk_list:
1445 added_existing_disk = False
1446 for disk in disk_list:
1447 if "image_id" in disk and disk["image_id"] is not None:
1448 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1449 disk["image_id"] , vapp_uuid))
1450 self.add_existing_disk(catalogs=catalogs,
1451 image_id=disk["image_id"],
1452 size = disk["size"],
1453 template_name=templateName,
1454 vapp_uuid=vapp_uuid
1455 )
1456 added_existing_disk = True
1457 else:
1458 #Wait till added existing disk gets reflected into vCD database/API
1459 if added_existing_disk:
1460 time.sleep(5)
1461 added_existing_disk = False
1462 self.add_new_disk(vca, vapp_uuid, disk['size'])
1463
1464 if numas:
1465 # Assigning numa affinity setting
1466 for numa in numas:
1467 if 'paired-threads-id' in numa:
1468 paired_threads_id = numa['paired-threads-id']
1469 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1470
1471 # add NICs & connect to networks in netlist
1472 try:
1473 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1474 nicIndex = 0
1475 primary_nic_index = 0
1476 for net in net_list:
1477 # openmano uses network id in UUID format.
1478 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1479 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1480 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1481
1482 if 'net_id' not in net:
1483 continue
1484
1485 interface_net_id = net['net_id']
1486 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1487 interface_network_mode = net['use']
1488
1489 if interface_network_mode == 'mgmt':
1490 primary_nic_index = nicIndex
1491
1492 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1493 - DHCP (The IP address is obtained from a DHCP service.)
1494 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1495 - NONE (No IP addressing mode specified.)"""
1496
1497 if primary_netname is not None:
1498 nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
1499 if len(nets) == 1:
1500 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1501 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1502 if type(task) is GenericTask:
1503 vca.block_until_completed(task)
1504 # connect network to VM - with all DHCP by default
1505
1506 type_list = ['PF','VF','VFnotShared']
1507 if 'type' in net and net['type'] not in type_list:
1508 # fetching nic type from vnf
1509 if 'model' in net:
1510 nic_type = net['model']
1511 self.logger.info("new_vminstance(): adding network adapter "\
1512 "to a network {}".format(nets[0].name))
1513 self.add_network_adapter_to_vms(vapp, nets[0].name,
1514 primary_nic_index,
1515 nicIndex,
1516 net,
1517 nic_type=nic_type)
1518 else:
1519 self.logger.info("new_vminstance(): adding network adapter "\
1520 "to a network {}".format(nets[0].name))
1521 self.add_network_adapter_to_vms(vapp, nets[0].name,
1522 primary_nic_index,
1523 nicIndex,
1524 net)
1525 nicIndex += 1
1526
1527 # cloud-init for ssh-key injection
1528 if cloud_config:
1529 self.cloud_init(vapp,cloud_config)
1530
1531 # deploy and power on vm
1532 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1533 deploytask = vapp.deploy(powerOn=False)
1534 if type(deploytask) is GenericTask:
1535 vca.block_until_completed(deploytask)
1536
1537 # ############# Stub code for SRIOV #################
1538 #Add SRIOV
1539 # if len(sriov_net_info) > 0:
1540 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1541 # vmname_andid ))
1542 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1543 # sriov_net_info,
1544 # vmname_andid)
1545 # if sriov_status:
1546 # self.logger.info("Added SRIOV {} to VM {}".format(
1547 # sriov_net_info,
1548 # vmname_andid)
1549 # )
1550 # reserve_memory = True
1551 # else:
1552 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1553 # sriov_net_info,
1554 # vmname_andid)
1555 # )
1556
1557 # If VM has PCI devices or SRIOV reserve memory for VM
1558 if reserve_memory:
1559 memReserve = vm_obj.config.hardware.memoryMB
1560 spec = vim.vm.ConfigSpec()
1561 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1562 task = vm_obj.ReconfigVM_Task(spec=spec)
1563 if task:
1564 result = self.wait_for_vcenter_task(task, vcenter_conect)
1565 self.logger.info("Reserved memmoery {} MB for "\
1566 "VM VM status: {}".format(str(memReserve),result))
1567 else:
1568 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1569 str(memReserve),str(vm_obj)))
1570
1571 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1572 poweron_task = vapp.poweron()
1573 if type(poweron_task) is GenericTask:
1574 vca.block_until_completed(poweron_task)
1575
1576 except Exception as exp :
1577 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1578 self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1579 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
1580
1581 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1582 wait_time = 0
1583 vapp_uuid = None
1584 while wait_time <= MAX_WAIT_TIME:
1585 try:
1586 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
1587 except Exception as exp:
1588 raise vimconn.vimconnUnexpectedResponse(
1589 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1590 .format(vmname_andid, exp))
1591
1592 if vapp and vapp.me.deployed:
1593 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
1594 break
1595 else:
1596 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1597 time.sleep(INTERVAL_TIME)
1598
1599 wait_time +=INTERVAL_TIME
1600
1601 if vapp_uuid is not None:
1602 return vapp_uuid
1603 else:
1604 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1605
1606 ##
1607 ##
1608 ## based on current discussion
1609 ##
1610 ##
1611 ## server:
1612 # created: '2016-09-08T11:51:58'
1613 # description: simple-instance.linux1.1
1614 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1615 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1616 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1617 # status: ACTIVE
1618 # error_msg:
1619 # interfaces: …
1620 #
1621 def get_vminstance(self, vim_vm_uuid=None):
1622 """Returns the VM instance information from VIM"""
1623
1624 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1625 vca = self.connect()
1626 if not vca:
1627 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1628
1629 vdc = vca.get_vdc(self.tenant_name)
1630 if vdc is None:
1631 raise vimconn.vimconnConnectionException(
1632 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1633
1634 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1635 if not vm_info_dict:
1636 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1637 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1638
1639 status_key = vm_info_dict['status']
1640 error = ''
1641 try:
1642 vm_dict = {'created': vm_info_dict['created'],
1643 'description': vm_info_dict['name'],
1644 'status': vcdStatusCode2manoFormat[int(status_key)],
1645 'hostId': vm_info_dict['vmuuid'],
1646 'error_msg': error,
1647 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1648
1649 if 'interfaces' in vm_info_dict:
1650 vm_dict['interfaces'] = vm_info_dict['interfaces']
1651 else:
1652 vm_dict['interfaces'] = []
1653 except KeyError:
1654 vm_dict = {'created': '',
1655 'description': '',
1656 'status': vcdStatusCode2manoFormat[int(-1)],
1657 'hostId': vm_info_dict['vmuuid'],
1658 'error_msg': "Inconsistency state",
1659 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1660
1661 return vm_dict
1662
1663 def delete_vminstance(self, vm__vim_uuid):
1664 """Method poweroff and remove VM instance from vcloud director network.
1665
1666 Args:
1667 vm__vim_uuid: VM UUID
1668
1669 Returns:
1670 Returns the instance identifier
1671 """
1672
1673 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1674 vca = self.connect()
1675 if not vca:
1676 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1677
1678 vdc = vca.get_vdc(self.tenant_name)
1679 if vdc is None:
1680 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1681 self.tenant_name))
1682 raise vimconn.vimconnException(
1683 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1684
1685 try:
1686 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1687 if vapp_name is None:
1688 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1689 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1690 else:
1691 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1692
1693 # Delete vApp and wait for status change if task executed and vApp is None.
1694 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1695
1696 if vapp:
1697 if vapp.me.deployed:
1698 self.logger.info("Powering off vApp {}".format(vapp_name))
1699 #Power off vApp
1700 powered_off = False
1701 wait_time = 0
1702 while wait_time <= MAX_WAIT_TIME:
1703 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1704 if not vapp:
1705 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1706 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1707
1708 power_off_task = vapp.poweroff()
1709 if type(power_off_task) is GenericTask:
1710 result = vca.block_until_completed(power_off_task)
1711 if result:
1712 powered_off = True
1713 break
1714 else:
1715 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1716 time.sleep(INTERVAL_TIME)
1717
1718 wait_time +=INTERVAL_TIME
1719 if not powered_off:
1720 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1721 else:
1722 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1723
1724 #Undeploy vApp
1725 self.logger.info("Undeploy vApp {}".format(vapp_name))
1726 wait_time = 0
1727 undeployed = False
1728 while wait_time <= MAX_WAIT_TIME:
1729 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1730 if not vapp:
1731 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1732 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1733 undeploy_task = vapp.undeploy(action='powerOff')
1734
1735 if type(undeploy_task) is GenericTask:
1736 result = vca.block_until_completed(undeploy_task)
1737 if result:
1738 undeployed = True
1739 break
1740 else:
1741 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1742 time.sleep(INTERVAL_TIME)
1743
1744 wait_time +=INTERVAL_TIME
1745
1746 if not undeployed:
1747 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1748
1749 # delete vapp
1750 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1751 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1752
1753 if vapp is not None:
1754 wait_time = 0
1755 result = False
1756
1757 while wait_time <= MAX_WAIT_TIME:
1758 vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
1759 if not vapp:
1760 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1761 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1762
1763 delete_task = vapp.delete()
1764
1765 if type(delete_task) is GenericTask:
1766 vca.block_until_completed(delete_task)
1767 result = vca.block_until_completed(delete_task)
1768 if result:
1769 break
1770 else:
1771 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1772 time.sleep(INTERVAL_TIME)
1773
1774 wait_time +=INTERVAL_TIME
1775
1776 if not result:
1777 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1778
1779 except:
1780 self.logger.debug(traceback.format_exc())
1781 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1782
1783 if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
1784 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1785 return vm__vim_uuid
1786 else:
1787 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1788
1789 def refresh_vms_status(self, vm_list):
1790 """Get the status of the virtual machines and their interfaces/ports
1791 Params: the list of VM identifiers
1792 Returns a dictionary with:
1793 vm_id: #VIM id of this Virtual Machine
1794 status: #Mandatory. Text with one of:
1795 # DELETED (not found at vim)
1796 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1797 # OTHER (Vim reported other status not understood)
1798 # ERROR (VIM indicates an ERROR status)
1799 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1800 # CREATING (on building process), ERROR
1801 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1802 #
1803 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1804 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1805 interfaces:
1806 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1807 mac_address: #Text format XX:XX:XX:XX:XX:XX
1808 vim_net_id: #network id where this interface is connected
1809 vim_interface_id: #interface/port VIM id
1810 ip_address: #null, or text with IPv4, IPv6 address
1811 """
1812
1813 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1814
1815 vca = self.connect()
1816 if not vca:
1817 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1818
1819 vdc = vca.get_vdc(self.tenant_name)
1820 if vdc is None:
1821 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1822
1823 vms_dict = {}
1824 nsx_edge_list = []
1825 for vmuuid in vm_list:
1826 vmname = self.get_namebyvappid(vca, vdc, vmuuid)
1827 if vmname is not None:
1828
1829 try:
1830 the_vapp = vca.get_vapp(vdc, vmname)
1831 vm_info = the_vapp.get_vms_details()
1832 vm_status = vm_info[0]['status']
1833 vm_pci_details = self.get_vm_pci_details(vmuuid)
1834 vm_info[0].update(vm_pci_details)
1835
1836 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1837 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1838 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1839
1840 # get networks
1841 vm_app_networks = the_vapp.get_vms_network_info()
1842 for vapp_network in vm_app_networks:
1843 for vm_network in vapp_network:
1844 if vm_network['name'] == vmname:
1845 #Assign IP Address based on MAC Address in NSX DHCP lease info
1846 if vm_network['ip'] is None:
1847 if not nsx_edge_list:
1848 nsx_edge_list = self.get_edge_details()
1849 if nsx_edge_list is None:
1850 raise vimconn.vimconnException("refresh_vms_status:"\
1851 "Failed to get edge details from NSX Manager")
1852 if vm_network['mac'] is not None:
1853 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1854
1855 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1856 interface = {"mac_address": vm_network['mac'],
1857 "vim_net_id": vm_net_id,
1858 "vim_interface_id": vm_net_id,
1859 'ip_address': vm_network['ip']}
1860 # interface['vim_info'] = yaml.safe_dump(vm_network)
1861 vm_dict["interfaces"].append(interface)
1862 # add a vm to vm dict
1863 vms_dict.setdefault(vmuuid, vm_dict)
1864 except Exception as exp:
1865 self.logger.debug("Error in response {}".format(exp))
1866 self.logger.debug(traceback.format_exc())
1867
1868 return vms_dict
1869
1870
1871 def get_edge_details(self):
1872 """Get the NSX edge list from NSX Manager
1873 Returns list of NSX edges
1874 """
1875 edge_list = []
1876 rheaders = {'Content-Type': 'application/xml'}
1877 nsx_api_url = '/api/4.0/edges'
1878
1879 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
1880
1881 try:
1882 resp = requests.get(self.nsx_manager + nsx_api_url,
1883 auth = (self.nsx_user, self.nsx_password),
1884 verify = False, headers = rheaders)
1885 if resp.status_code == requests.codes.ok:
1886 paged_Edge_List = XmlElementTree.fromstring(resp.text)
1887 for edge_pages in paged_Edge_List:
1888 if edge_pages.tag == 'edgePage':
1889 for edge_summary in edge_pages:
1890 if edge_summary.tag == 'pagingInfo':
1891 for element in edge_summary:
1892 if element.tag == 'totalCount' and element.text == '0':
1893 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
1894 .format(self.nsx_manager))
1895
1896 if edge_summary.tag == 'edgeSummary':
1897 for element in edge_summary:
1898 if element.tag == 'id':
1899 edge_list.append(element.text)
1900 else:
1901 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
1902 .format(self.nsx_manager))
1903
1904 if not edge_list:
1905 raise vimconn.vimconnException("get_edge_details: "\
1906 "No NSX edge details found: {}"
1907 .format(self.nsx_manager))
1908 else:
1909 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
1910 return edge_list
1911 else:
1912 self.logger.debug("get_edge_details: "
1913 "Failed to get NSX edge details from NSX Manager: {}"
1914 .format(resp.content))
1915 return None
1916
1917 except Exception as exp:
1918 self.logger.debug("get_edge_details: "\
1919 "Failed to get NSX edge details from NSX Manager: {}"
1920 .format(exp))
1921 raise vimconn.vimconnException("get_edge_details: "\
1922 "Failed to get NSX edge details from NSX Manager: {}"
1923 .format(exp))
1924
1925
1926 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
1927 """Get IP address details from NSX edges, using the MAC address
1928 PARAMS: nsx_edges : List of NSX edges
1929 mac_address : Find IP address corresponding to this MAC address
1930 Returns: IP address corrresponding to the provided MAC address
1931 """
1932
1933 ip_addr = None
1934 rheaders = {'Content-Type': 'application/xml'}
1935
1936 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
1937
1938 try:
1939 for edge in nsx_edges:
1940 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
1941
1942 resp = requests.get(self.nsx_manager + nsx_api_url,
1943 auth = (self.nsx_user, self.nsx_password),
1944 verify = False, headers = rheaders)
1945
1946 if resp.status_code == requests.codes.ok:
1947 dhcp_leases = XmlElementTree.fromstring(resp.text)
1948 for child in dhcp_leases:
1949 if child.tag == 'dhcpLeaseInfo':
1950 dhcpLeaseInfo = child
1951 for leaseInfo in dhcpLeaseInfo:
1952 for elem in leaseInfo:
1953 if (elem.tag)=='macAddress':
1954 edge_mac_addr = elem.text
1955 if (elem.tag)=='ipAddress':
1956 ip_addr = elem.text
1957 if edge_mac_addr is not None:
1958 if edge_mac_addr == mac_address:
1959 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
1960 .format(ip_addr, mac_address,edge))
1961 return ip_addr
1962 else:
1963 self.logger.debug("get_ipaddr_from_NSXedge: "\
1964 "Error occurred while getting DHCP lease info from NSX Manager: {}"
1965 .format(resp.content))
1966
1967 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
1968 return None
1969
1970 except XmlElementTree.ParseError as Err:
1971 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
1972
1973
1974 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
1975 """Send and action over a VM instance from VIM
1976 Returns the vm_id if the action was successfully sent to the VIM"""
1977
1978 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
1979 if vm__vim_uuid is None or action_dict is None:
1980 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
1981
1982 vca = self.connect()
1983 if not vca:
1984 raise vimconn.vimconnConnectionException("self.connect() is failed.")
1985
1986 vdc = vca.get_vdc(self.tenant_name)
1987 if vdc is None:
1988 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
1989
1990 vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
1991 if vapp_name is None:
1992 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1993 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1994 else:
1995 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1996
1997 try:
1998 the_vapp = vca.get_vapp(vdc, vapp_name)
1999 # TODO fix all status
2000 if "start" in action_dict:
2001 vm_info = the_vapp.get_vms_details()
2002 vm_status = vm_info[0]['status']
2003 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2004 if vm_status == "Suspended" or vm_status == "Powered off":
2005 power_on_task = the_vapp.poweron()
2006 result = vca.block_until_completed(power_on_task)
2007 self.instance_actions_result("start", result, vapp_name)
2008 elif "rebuild" in action_dict:
2009 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2010 rebuild_task = the_vapp.deploy(powerOn=True)
2011 result = vca.block_until_completed(rebuild_task)
2012 self.instance_actions_result("rebuild", result, vapp_name)
2013 elif "pause" in action_dict:
2014 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2015 pause_task = the_vapp.undeploy(action='suspend')
2016 result = vca.block_until_completed(pause_task)
2017 self.instance_actions_result("pause", result, vapp_name)
2018 elif "resume" in action_dict:
2019 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2020 power_task = the_vapp.poweron()
2021 result = vca.block_until_completed(power_task)
2022 self.instance_actions_result("resume", result, vapp_name)
2023 elif "shutoff" in action_dict or "shutdown" in action_dict:
2024 action_name , value = action_dict.items()[0]
2025 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2026 power_off_task = the_vapp.undeploy(action='powerOff')
2027 result = vca.block_until_completed(power_off_task)
2028 if action_name == "shutdown":
2029 self.instance_actions_result("shutdown", result, vapp_name)
2030 else:
2031 self.instance_actions_result("shutoff", result, vapp_name)
2032 elif "forceOff" in action_dict:
2033 result = the_vapp.undeploy(action='force')
2034 self.instance_actions_result("forceOff", result, vapp_name)
2035 elif "reboot" in action_dict:
2036 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2037 reboot_task = the_vapp.reboot()
2038 else:
2039 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2040 return vm__vim_uuid
2041 except Exception as exp :
2042 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2043 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2044
2045 def instance_actions_result(self, action, result, vapp_name):
2046 if result:
2047 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2048 else:
2049 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2050
2051 def get_vminstance_console(self, vm_id, console_type="vnc"):
2052 """
2053 Get a console for the virtual machine
2054 Params:
2055 vm_id: uuid of the VM
2056 console_type, can be:
2057 "novnc" (by default), "xvpvnc" for VNC types,
2058 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2059 Returns dict with the console parameters:
2060 protocol: ssh, ftp, http, https, ...
2061 server: usually ip address
2062 port: the http, ssh, ... port
2063 suffix: extra text, e.g. the http path and query string
2064 """
2065 raise vimconn.vimconnNotImplemented("Should have implemented this")
2066
2067 # NOT USED METHODS in current version
2068
2069 def host_vim2gui(self, host, server_dict):
2070 """Transform host dictionary from VIM format to GUI format,
2071 and append to the server_dict
2072 """
2073 raise vimconn.vimconnNotImplemented("Should have implemented this")
2074
2075 def get_hosts_info(self):
2076 """Get the information of deployed hosts
2077 Returns the hosts content"""
2078 raise vimconn.vimconnNotImplemented("Should have implemented this")
2079
2080 def get_hosts(self, vim_tenant):
2081 """Get the hosts and deployed instances
2082 Returns the hosts content"""
2083 raise vimconn.vimconnNotImplemented("Should have implemented this")
2084
2085 def get_processor_rankings(self):
2086 """Get the processor rankings in the VIM database"""
2087 raise vimconn.vimconnNotImplemented("Should have implemented this")
2088
2089 def new_host(self, host_data):
2090 """Adds a new host to VIM"""
2091 '''Returns status code of the VIM response'''
2092 raise vimconn.vimconnNotImplemented("Should have implemented this")
2093
2094 def new_external_port(self, port_data):
2095 """Adds a external port to VIM"""
2096 '''Returns the port identifier'''
2097 raise vimconn.vimconnNotImplemented("Should have implemented this")
2098
2099 def new_external_network(self, net_name, net_type):
2100 """Adds a external network to VIM (shared)"""
2101 '''Returns the network identifier'''
2102 raise vimconn.vimconnNotImplemented("Should have implemented this")
2103
2104 def connect_port_network(self, port_id, network_id, admin=False):
2105 """Connects a external port to a network"""
2106 '''Returns status code of the VIM response'''
2107 raise vimconn.vimconnNotImplemented("Should have implemented this")
2108
2109 def new_vminstancefromJSON(self, vm_data):
2110 """Adds a VM instance to VIM"""
2111 '''Returns the instance identifier'''
2112 raise vimconn.vimconnNotImplemented("Should have implemented this")
2113
2114 def get_network_name_by_id(self, network_uuid=None):
2115 """Method gets vcloud director network named based on supplied uuid.
2116
2117 Args:
2118 network_uuid: network_id
2119
2120 Returns:
2121 The return network name.
2122 """
2123
2124 vca = self.connect()
2125 if not vca:
2126 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2127
2128 if not network_uuid:
2129 return None
2130
2131 try:
2132 org_dict = self.get_org(self.org_uuid)
2133 if 'networks' in org_dict:
2134 org_network_dict = org_dict['networks']
2135 for net_uuid in org_network_dict:
2136 if net_uuid == network_uuid:
2137 return org_network_dict[net_uuid]
2138 except:
2139 self.logger.debug("Exception in get_network_name_by_id")
2140 self.logger.debug(traceback.format_exc())
2141
2142 return None
2143
2144 def get_network_id_by_name(self, network_name=None):
2145 """Method gets vcloud director network uuid based on supplied name.
2146
2147 Args:
2148 network_name: network_name
2149 Returns:
2150 The return network uuid.
2151 network_uuid: network_id
2152 """
2153
2154 vca = self.connect()
2155 if not vca:
2156 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2157
2158 if not network_name:
2159 self.logger.debug("get_network_id_by_name() : Network name is empty")
2160 return None
2161
2162 try:
2163 org_dict = self.get_org(self.org_uuid)
2164 if org_dict and 'networks' in org_dict:
2165 org_network_dict = org_dict['networks']
2166 for net_uuid,net_name in org_network_dict.iteritems():
2167 if net_name == network_name:
2168 return net_uuid
2169
2170 except KeyError as exp:
2171 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2172
2173 return None
2174
2175 def list_org_action(self):
2176 """
2177 Method leverages vCloud director and query for available organization for particular user
2178
2179 Args:
2180 vca - is active VCA connection.
2181 vdc_name - is a vdc name that will be used to query vms action
2182
2183 Returns:
2184 The return XML respond
2185 """
2186
2187 vca = self.connect()
2188 if not vca:
2189 raise vimconn.vimconnConnectionException("self.connect() is failed")
2190
2191 url_list = [vca.host, '/api/org']
2192 vm_list_rest_call = ''.join(url_list)
2193
2194 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2195 response = Http.get(url=vm_list_rest_call,
2196 headers=vca.vcloud_session.get_vcloud_headers(),
2197 verify=vca.verify,
2198 logger=vca.logger)
2199 if response.status_code == requests.codes.ok:
2200 return response.content
2201
2202 return None
2203
2204 def get_org_action(self, org_uuid=None):
2205 """
2206 Method leverages vCloud director and retrieve available object fdr organization.
2207
2208 Args:
2209 vca - is active VCA connection.
2210 vdc_name - is a vdc name that will be used to query vms action
2211
2212 Returns:
2213 The return XML respond
2214 """
2215
2216 vca = self.connect()
2217 if not vca:
2218 raise vimconn.vimconnConnectionException("self.connect() is failed")
2219
2220 if org_uuid is None:
2221 return None
2222
2223 url_list = [vca.host, '/api/org/', org_uuid]
2224 vm_list_rest_call = ''.join(url_list)
2225
2226 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2227 response = Http.get(url=vm_list_rest_call,
2228 headers=vca.vcloud_session.get_vcloud_headers(),
2229 verify=vca.verify,
2230 logger=vca.logger)
2231 if response.status_code == requests.codes.ok:
2232 return response.content
2233
2234 return None
2235
2236 def get_org(self, org_uuid=None):
2237 """
2238 Method retrieves available organization in vCloud Director
2239
2240 Args:
2241 org_uuid - is a organization uuid.
2242
2243 Returns:
2244 The return dictionary with following key
2245 "network" - for network list under the org
2246 "catalogs" - for network list under the org
2247 "vdcs" - for vdc list under org
2248 """
2249
2250 org_dict = {}
2251 vca = self.connect()
2252 if not vca:
2253 raise vimconn.vimconnConnectionException("self.connect() is failed")
2254
2255 if org_uuid is None:
2256 return org_dict
2257
2258 content = self.get_org_action(org_uuid=org_uuid)
2259 try:
2260 vdc_list = {}
2261 network_list = {}
2262 catalog_list = {}
2263 vm_list_xmlroot = XmlElementTree.fromstring(content)
2264 for child in vm_list_xmlroot:
2265 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2266 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2267 org_dict['vdcs'] = vdc_list
2268 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2269 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2270 org_dict['networks'] = network_list
2271 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2272 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2273 org_dict['catalogs'] = catalog_list
2274 except:
2275 pass
2276
2277 return org_dict
2278
2279 def get_org_list(self):
2280 """
2281 Method retrieves available organization in vCloud Director
2282
2283 Args:
2284 vca - is active VCA connection.
2285
2286 Returns:
2287 The return dictionary and key for each entry VDC UUID
2288 """
2289
2290 org_dict = {}
2291 vca = self.connect()
2292 if not vca:
2293 raise vimconn.vimconnConnectionException("self.connect() is failed")
2294
2295 content = self.list_org_action()
2296 try:
2297 vm_list_xmlroot = XmlElementTree.fromstring(content)
2298 for vm_xml in vm_list_xmlroot:
2299 if vm_xml.tag.split("}")[1] == 'Org':
2300 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2301 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2302 except:
2303 pass
2304
2305 return org_dict
2306
2307 def vms_view_action(self, vdc_name=None):
2308 """ Method leverages vCloud director vms query call
2309
2310 Args:
2311 vca - is active VCA connection.
2312 vdc_name - is a vdc name that will be used to query vms action
2313
2314 Returns:
2315 The return XML respond
2316 """
2317 vca = self.connect()
2318 if vdc_name is None:
2319 return None
2320
2321 url_list = [vca.host, '/api/vms/query']
2322 vm_list_rest_call = ''.join(url_list)
2323
2324 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2325 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2326 vca.vcloud_session.organization.Link)
2327 if len(refs) == 1:
2328 response = Http.get(url=vm_list_rest_call,
2329 headers=vca.vcloud_session.get_vcloud_headers(),
2330 verify=vca.verify,
2331 logger=vca.logger)
2332 if response.status_code == requests.codes.ok:
2333 return response.content
2334
2335 return None
2336
2337 def get_vapp_list(self, vdc_name=None):
2338 """
2339 Method retrieves vApp list deployed vCloud director and returns a dictionary
2340 contains a list of all vapp deployed for queried VDC.
2341 The key for a dictionary is vApp UUID
2342
2343
2344 Args:
2345 vca - is active VCA connection.
2346 vdc_name - is a vdc name that will be used to query vms action
2347
2348 Returns:
2349 The return dictionary and key for each entry vapp UUID
2350 """
2351
2352 vapp_dict = {}
2353 if vdc_name is None:
2354 return vapp_dict
2355
2356 content = self.vms_view_action(vdc_name=vdc_name)
2357 try:
2358 vm_list_xmlroot = XmlElementTree.fromstring(content)
2359 for vm_xml in vm_list_xmlroot:
2360 if vm_xml.tag.split("}")[1] == 'VMRecord':
2361 if vm_xml.attrib['isVAppTemplate'] == 'true':
2362 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2363 if 'vappTemplate-' in rawuuid[0]:
2364 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2365 # vm and use raw UUID as key
2366 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2367 except:
2368 pass
2369
2370 return vapp_dict
2371
2372 def get_vm_list(self, vdc_name=None):
2373 """
2374 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2375 contains a list of all VM's deployed for queried VDC.
2376 The key for a dictionary is VM UUID
2377
2378
2379 Args:
2380 vca - is active VCA connection.
2381 vdc_name - is a vdc name that will be used to query vms action
2382
2383 Returns:
2384 The return dictionary and key for each entry vapp UUID
2385 """
2386 vm_dict = {}
2387
2388 if vdc_name is None:
2389 return vm_dict
2390
2391 content = self.vms_view_action(vdc_name=vdc_name)
2392 try:
2393 vm_list_xmlroot = XmlElementTree.fromstring(content)
2394 for vm_xml in vm_list_xmlroot:
2395 if vm_xml.tag.split("}")[1] == 'VMRecord':
2396 if vm_xml.attrib['isVAppTemplate'] == 'false':
2397 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2398 if 'vm-' in rawuuid[0]:
2399 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2400 # vm and use raw UUID as key
2401 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2402 except:
2403 pass
2404
2405 return vm_dict
2406
2407 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2408 """
2409 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2410 contains a list of all VM's deployed for queried VDC.
2411 The key for a dictionary is VM UUID
2412
2413
2414 Args:
2415 vca - is active VCA connection.
2416 vdc_name - is a vdc name that will be used to query vms action
2417
2418 Returns:
2419 The return dictionary and key for each entry vapp UUID
2420 """
2421 vm_dict = {}
2422 vca = self.connect()
2423 if not vca:
2424 raise vimconn.vimconnConnectionException("self.connect() is failed")
2425
2426 if vdc_name is None:
2427 return vm_dict
2428
2429 content = self.vms_view_action(vdc_name=vdc_name)
2430 try:
2431 vm_list_xmlroot = XmlElementTree.fromstring(content)
2432 for vm_xml in vm_list_xmlroot:
2433 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2434 # lookup done by UUID
2435 if isuuid:
2436 if vapp_name in vm_xml.attrib['container']:
2437 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2438 if 'vm-' in rawuuid[0]:
2439 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2440 break
2441 # lookup done by Name
2442 else:
2443 if vapp_name in vm_xml.attrib['name']:
2444 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2445 if 'vm-' in rawuuid[0]:
2446 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2447 break
2448 except:
2449 pass
2450
2451 return vm_dict
2452
2453 def get_network_action(self, network_uuid=None):
2454 """
2455 Method leverages vCloud director and query network based on network uuid
2456
2457 Args:
2458 vca - is active VCA connection.
2459 network_uuid - is a network uuid
2460
2461 Returns:
2462 The return XML respond
2463 """
2464
2465 vca = self.connect()
2466 if not vca:
2467 raise vimconn.vimconnConnectionException("self.connect() is failed")
2468
2469 if network_uuid is None:
2470 return None
2471
2472 url_list = [vca.host, '/api/network/', network_uuid]
2473 vm_list_rest_call = ''.join(url_list)
2474
2475 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2476 response = Http.get(url=vm_list_rest_call,
2477 headers=vca.vcloud_session.get_vcloud_headers(),
2478 verify=vca.verify,
2479 logger=vca.logger)
2480 if response.status_code == requests.codes.ok:
2481 return response.content
2482
2483 return None
2484
2485 def get_vcd_network(self, network_uuid=None):
2486 """
2487 Method retrieves available network from vCloud Director
2488
2489 Args:
2490 network_uuid - is VCD network UUID
2491
2492 Each element serialized as key : value pair
2493
2494 Following keys available for access. network_configuration['Gateway'}
2495 <Configuration>
2496 <IpScopes>
2497 <IpScope>
2498 <IsInherited>true</IsInherited>
2499 <Gateway>172.16.252.100</Gateway>
2500 <Netmask>255.255.255.0</Netmask>
2501 <Dns1>172.16.254.201</Dns1>
2502 <Dns2>172.16.254.202</Dns2>
2503 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2504 <IsEnabled>true</IsEnabled>
2505 <IpRanges>
2506 <IpRange>
2507 <StartAddress>172.16.252.1</StartAddress>
2508 <EndAddress>172.16.252.99</EndAddress>
2509 </IpRange>
2510 </IpRanges>
2511 </IpScope>
2512 </IpScopes>
2513 <FenceMode>bridged</FenceMode>
2514
2515 Returns:
2516 The return dictionary and key for each entry vapp UUID
2517 """
2518
2519 network_configuration = {}
2520 if network_uuid is None:
2521 return network_uuid
2522
2523 try:
2524 content = self.get_network_action(network_uuid=network_uuid)
2525 vm_list_xmlroot = XmlElementTree.fromstring(content)
2526
2527 network_configuration['status'] = vm_list_xmlroot.get("status")
2528 network_configuration['name'] = vm_list_xmlroot.get("name")
2529 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2530
2531 for child in vm_list_xmlroot:
2532 if child.tag.split("}")[1] == 'IsShared':
2533 network_configuration['isShared'] = child.text.strip()
2534 if child.tag.split("}")[1] == 'Configuration':
2535 for configuration in child.iter():
2536 tagKey = configuration.tag.split("}")[1].strip()
2537 if tagKey != "":
2538 network_configuration[tagKey] = configuration.text.strip()
2539 return network_configuration
2540 except Exception as exp :
2541 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2542 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2543
2544 return network_configuration
2545
2546 def delete_network_action(self, network_uuid=None):
2547 """
2548 Method delete given network from vCloud director
2549
2550 Args:
2551 network_uuid - is a network uuid that client wish to delete
2552
2553 Returns:
2554 The return None or XML respond or false
2555 """
2556
2557 vca = self.connect_as_admin()
2558 if not vca:
2559 raise vimconn.vimconnConnectionException("self.connect() is failed")
2560 if network_uuid is None:
2561 return False
2562
2563 url_list = [vca.host, '/api/admin/network/', network_uuid]
2564 vm_list_rest_call = ''.join(url_list)
2565
2566 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2567 response = Http.delete(url=vm_list_rest_call,
2568 headers=vca.vcloud_session.get_vcloud_headers(),
2569 verify=vca.verify,
2570 logger=vca.logger)
2571
2572 if response.status_code == 202:
2573 return True
2574
2575 return False
2576
2577 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2578 ip_profile=None, isshared='true'):
2579 """
2580 Method create network in vCloud director
2581
2582 Args:
2583 network_name - is network name to be created.
2584 net_type - can be 'bridge','data','ptp','mgmt'.
2585 ip_profile is a dict containing the IP parameters of the network
2586 isshared - is a boolean
2587 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2588 It optional attribute. by default if no parent network indicate the first available will be used.
2589
2590 Returns:
2591 The return network uuid or return None
2592 """
2593
2594 new_network_name = [network_name, '-', str(uuid.uuid4())]
2595 content = self.create_network_rest(network_name=''.join(new_network_name),
2596 ip_profile=ip_profile,
2597 net_type=net_type,
2598 parent_network_uuid=parent_network_uuid,
2599 isshared=isshared)
2600 if content is None:
2601 self.logger.debug("Failed create network {}.".format(network_name))
2602 return None
2603
2604 try:
2605 vm_list_xmlroot = XmlElementTree.fromstring(content)
2606 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2607 if len(vcd_uuid) == 4:
2608 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2609 return vcd_uuid[3]
2610 except:
2611 self.logger.debug("Failed create network {}".format(network_name))
2612 return None
2613
2614 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2615 ip_profile=None, isshared='true'):
2616 """
2617 Method create network in vCloud director
2618
2619 Args:
2620 network_name - is network name to be created.
2621 net_type - can be 'bridge','data','ptp','mgmt'.
2622 ip_profile is a dict containing the IP parameters of the network
2623 isshared - is a boolean
2624 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2625 It optional attribute. by default if no parent network indicate the first available will be used.
2626
2627 Returns:
2628 The return network uuid or return None
2629 """
2630
2631 vca = self.connect_as_admin()
2632 if not vca:
2633 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2634 if network_name is None:
2635 return None
2636
2637 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2638 vm_list_rest_call = ''.join(url_list)
2639 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2640 response = Http.get(url=vm_list_rest_call,
2641 headers=vca.vcloud_session.get_vcloud_headers(),
2642 verify=vca.verify,
2643 logger=vca.logger)
2644
2645 provider_network = None
2646 available_networks = None
2647 add_vdc_rest_url = None
2648
2649 if response.status_code != requests.codes.ok:
2650 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2651 response.status_code))
2652 return None
2653 else:
2654 try:
2655 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2656 for child in vm_list_xmlroot:
2657 if child.tag.split("}")[1] == 'ProviderVdcReference':
2658 provider_network = child.attrib.get('href')
2659 # application/vnd.vmware.admin.providervdc+xml
2660 if child.tag.split("}")[1] == 'Link':
2661 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2662 and child.attrib.get('rel') == 'add':
2663 add_vdc_rest_url = child.attrib.get('href')
2664 except:
2665 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2666 self.logger.debug("Respond body {}".format(response.content))
2667 return None
2668
2669 # find pvdc provided available network
2670 response = Http.get(url=provider_network,
2671 headers=vca.vcloud_session.get_vcloud_headers(),
2672 verify=vca.verify,
2673 logger=vca.logger)
2674 if response.status_code != requests.codes.ok:
2675 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2676 response.status_code))
2677 return None
2678
2679 # available_networks.split("/")[-1]
2680
2681 if parent_network_uuid is None:
2682 try:
2683 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2684 for child in vm_list_xmlroot.iter():
2685 if child.tag.split("}")[1] == 'AvailableNetworks':
2686 for networks in child.iter():
2687 # application/vnd.vmware.admin.network+xml
2688 if networks.attrib.get('href') is not None:
2689 available_networks = networks.attrib.get('href')
2690 break
2691 except:
2692 return None
2693
2694 try:
2695 #Configure IP profile of the network
2696 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2697
2698 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2699 subnet_rand = random.randint(0, 255)
2700 ip_base = "192.168.{}.".format(subnet_rand)
2701 ip_profile['subnet_address'] = ip_base + "0/24"
2702 else:
2703 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2704
2705 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2706 ip_profile['gateway_address']=ip_base + "1"
2707 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2708 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2709 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2710 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2711 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2712 ip_profile['dhcp_start_address']=ip_base + "3"
2713 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2714 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2715 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2716 ip_profile['dns_address']=ip_base + "2"
2717
2718 gateway_address=ip_profile['gateway_address']
2719 dhcp_count=int(ip_profile['dhcp_count'])
2720 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2721
2722 if ip_profile['dhcp_enabled']==True:
2723 dhcp_enabled='true'
2724 else:
2725 dhcp_enabled='false'
2726 dhcp_start_address=ip_profile['dhcp_start_address']
2727
2728 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2729 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2730 end_ip_int += dhcp_count - 1
2731 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2732
2733 ip_version=ip_profile['ip_version']
2734 dns_address=ip_profile['dns_address']
2735 except KeyError as exp:
2736 self.logger.debug("Create Network REST: Key error {}".format(exp))
2737 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2738
2739 # either use client provided UUID or search for a first available
2740 # if both are not defined we return none
2741 if parent_network_uuid is not None:
2742 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2743 add_vdc_rest_url = ''.join(url_list)
2744
2745 #Creating all networks as Direct Org VDC type networks.
2746 #Unused in case of Underlay (data/ptp) network interface.
2747 fence_mode="bridged"
2748 is_inherited='false'
2749 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2750 <Description>Openmano created</Description>
2751 <Configuration>
2752 <IpScopes>
2753 <IpScope>
2754 <IsInherited>{1:s}</IsInherited>
2755 <Gateway>{2:s}</Gateway>
2756 <Netmask>{3:s}</Netmask>
2757 <Dns1>{4:s}</Dns1>
2758 <IsEnabled>{5:s}</IsEnabled>
2759 <IpRanges>
2760 <IpRange>
2761 <StartAddress>{6:s}</StartAddress>
2762 <EndAddress>{7:s}</EndAddress>
2763 </IpRange>
2764 </IpRanges>
2765 </IpScope>
2766 </IpScopes>
2767 <ParentNetwork href="{8:s}"/>
2768 <FenceMode>{9:s}</FenceMode>
2769 </Configuration>
2770 <IsShared>{10:s}</IsShared>
2771 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2772 subnet_address, dns_address, dhcp_enabled,
2773 dhcp_start_address, dhcp_end_address, available_networks,
2774 fence_mode, isshared)
2775
2776 headers = vca.vcloud_session.get_vcloud_headers()
2777 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2778 try:
2779 response = Http.post(url=add_vdc_rest_url,
2780 headers=headers,
2781 data=data,
2782 verify=vca.verify,
2783 logger=vca.logger)
2784
2785 if response.status_code != 201:
2786 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2787 .format(response.status_code,response.content))
2788 else:
2789 network = networkType.parseString(response.content, True)
2790 create_nw_task = network.get_Tasks().get_Task()[0]
2791
2792 # if we all ok we respond with content after network creation completes
2793 # otherwise by default return None
2794 if create_nw_task is not None:
2795 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2796 status = vca.block_until_completed(create_nw_task)
2797 if status:
2798 return response.content
2799 else:
2800 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2801 .format(response.content))
2802 except Exception as exp:
2803 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2804
2805 return None
2806
2807 def convert_cidr_to_netmask(self, cidr_ip=None):
2808 """
2809 Method sets convert CIDR netmask address to normal IP format
2810 Args:
2811 cidr_ip : CIDR IP address
2812 Returns:
2813 netmask : Converted netmask
2814 """
2815 if cidr_ip is not None:
2816 if '/' in cidr_ip:
2817 network, net_bits = cidr_ip.split('/')
2818 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2819 else:
2820 netmask = cidr_ip
2821 return netmask
2822 return None
2823
2824 def get_provider_rest(self, vca=None):
2825 """
2826 Method gets provider vdc view from vcloud director
2827
2828 Args:
2829 network_name - is network name to be created.
2830 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2831 It optional attribute. by default if no parent network indicate the first available will be used.
2832
2833 Returns:
2834 The return xml content of respond or None
2835 """
2836
2837 url_list = [vca.host, '/api/admin']
2838 response = Http.get(url=''.join(url_list),
2839 headers=vca.vcloud_session.get_vcloud_headers(),
2840 verify=vca.verify,
2841 logger=vca.logger)
2842
2843 if response.status_code == requests.codes.ok:
2844 return response.content
2845 return None
2846
2847 def create_vdc(self, vdc_name=None):
2848
2849 vdc_dict = {}
2850
2851 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2852 if xml_content is not None:
2853 try:
2854 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2855 for child in task_resp_xmlroot:
2856 if child.tag.split("}")[1] == 'Owner':
2857 vdc_id = child.attrib.get('href').split("/")[-1]
2858 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2859 return vdc_dict
2860 except:
2861 self.logger.debug("Respond body {}".format(xml_content))
2862
2863 return None
2864
2865 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2866 """
2867 Method create vdc in vCloud director based on VDC template.
2868 it uses pre-defined template that must be named openmano
2869
2870 Args:
2871 vdc_name - name of a new vdc.
2872
2873 Returns:
2874 The return xml content of respond or None
2875 """
2876
2877 self.logger.info("Creating new vdc {}".format(vdc_name))
2878 vca = self.connect()
2879 if not vca:
2880 raise vimconn.vimconnConnectionException("self.connect() is failed")
2881 if vdc_name is None:
2882 return None
2883
2884 url_list = [vca.host, '/api/vdcTemplates']
2885 vm_list_rest_call = ''.join(url_list)
2886 response = Http.get(url=vm_list_rest_call,
2887 headers=vca.vcloud_session.get_vcloud_headers(),
2888 verify=vca.verify,
2889 logger=vca.logger)
2890
2891 # container url to a template
2892 vdc_template_ref = None
2893 try:
2894 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2895 for child in vm_list_xmlroot:
2896 # application/vnd.vmware.admin.providervdc+xml
2897 # we need find a template from witch we instantiate VDC
2898 if child.tag.split("}")[1] == 'VdcTemplate':
2899 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
2900 'name') == 'openmano':
2901 vdc_template_ref = child.attrib.get('href')
2902 except:
2903 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2904 self.logger.debug("Respond body {}".format(response.content))
2905 return None
2906
2907 # if we didn't found required pre defined template we return None
2908 if vdc_template_ref is None:
2909 return None
2910
2911 try:
2912 # instantiate vdc
2913 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
2914 vm_list_rest_call = ''.join(url_list)
2915 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2916 <Source href="{1:s}"></Source>
2917 <Description>opnemano</Description>
2918 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
2919 headers = vca.vcloud_session.get_vcloud_headers()
2920 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
2921 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
2922 logger=vca.logger)
2923 # if we all ok we respond with content otherwise by default None
2924 if response.status_code >= 200 and response.status_code < 300:
2925 return response.content
2926 return None
2927 except:
2928 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2929 self.logger.debug("Respond body {}".format(response.content))
2930
2931 return None
2932
2933 def create_vdc_rest(self, vdc_name=None):
2934 """
2935 Method create network in vCloud director
2936
2937 Args:
2938 network_name - is network name to be created.
2939 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2940 It optional attribute. by default if no parent network indicate the first available will be used.
2941
2942 Returns:
2943 The return network uuid or return None
2944 """
2945
2946 self.logger.info("Creating new vdc {}".format(vdc_name))
2947
2948 vca = self.connect_as_admin()
2949 if not vca:
2950 raise vimconn.vimconnConnectionException("self.connect() is failed")
2951 if vdc_name is None:
2952 return None
2953
2954 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
2955 vm_list_rest_call = ''.join(url_list)
2956 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2957 response = Http.get(url=vm_list_rest_call,
2958 headers=vca.vcloud_session.get_vcloud_headers(),
2959 verify=vca.verify,
2960 logger=vca.logger)
2961
2962 provider_vdc_ref = None
2963 add_vdc_rest_url = None
2964 available_networks = None
2965
2966 if response.status_code != requests.codes.ok:
2967 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2968 response.status_code))
2969 return None
2970 else:
2971 try:
2972 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2973 for child in vm_list_xmlroot:
2974 # application/vnd.vmware.admin.providervdc+xml
2975 if child.tag.split("}")[1] == 'Link':
2976 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
2977 and child.attrib.get('rel') == 'add':
2978 add_vdc_rest_url = child.attrib.get('href')
2979 except:
2980 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2981 self.logger.debug("Respond body {}".format(response.content))
2982 return None
2983
2984 response = self.get_provider_rest(vca=vca)
2985 try:
2986 vm_list_xmlroot = XmlElementTree.fromstring(response)
2987 for child in vm_list_xmlroot:
2988 if child.tag.split("}")[1] == 'ProviderVdcReferences':
2989 for sub_child in child:
2990 provider_vdc_ref = sub_child.attrib.get('href')
2991 except:
2992 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2993 self.logger.debug("Respond body {}".format(response))
2994 return None
2995
2996 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
2997 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
2998 <AllocationModel>ReservationPool</AllocationModel>
2999 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
3000 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
3001 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
3002 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
3003 <ProviderVdcReference
3004 name="Main Provider"
3005 href="{2:s}" />
3006 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3007 escape(vdc_name),
3008 provider_vdc_ref)
3009
3010 headers = vca.vcloud_session.get_vcloud_headers()
3011 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3012 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3013 logger=vca.logger)
3014
3015 # if we all ok we respond with content otherwise by default None
3016 if response.status_code == 201:
3017 return response.content
3018 return None
3019
3020 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3021 """
3022 Method retrieve vapp detail from vCloud director
3023
3024 Args:
3025 vapp_uuid - is vapp identifier.
3026
3027 Returns:
3028 The return network uuid or return None
3029 """
3030
3031 parsed_respond = {}
3032 vca = None
3033
3034 if need_admin_access:
3035 vca = self.connect_as_admin()
3036 else:
3037 vca = self.connect()
3038
3039 if not vca:
3040 raise vimconn.vimconnConnectionException("self.connect() is failed")
3041 if vapp_uuid is None:
3042 return None
3043
3044 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3045 get_vapp_restcall = ''.join(url_list)
3046
3047 if vca.vcloud_session and vca.vcloud_session.organization:
3048 response = Http.get(url=get_vapp_restcall,
3049 headers=vca.vcloud_session.get_vcloud_headers(),
3050 verify=vca.verify,
3051 logger=vca.logger)
3052
3053 if response.status_code != requests.codes.ok:
3054 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3055 response.status_code))
3056 return parsed_respond
3057
3058 try:
3059 xmlroot_respond = XmlElementTree.fromstring(response.content)
3060 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3061
3062 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3063 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3064 'vmw': 'http://www.vmware.com/schema/ovf',
3065 'vm': 'http://www.vmware.com/vcloud/v1.5',
3066 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3067 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3068 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3069 }
3070
3071 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3072 if created_section is not None:
3073 parsed_respond['created'] = created_section.text
3074
3075 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3076 if network_section is not None and 'networkName' in network_section.attrib:
3077 parsed_respond['networkname'] = network_section.attrib['networkName']
3078
3079 ipscopes_section = \
3080 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3081 namespaces)
3082 if ipscopes_section is not None:
3083 for ipscope in ipscopes_section:
3084 for scope in ipscope:
3085 tag_key = scope.tag.split("}")[1]
3086 if tag_key == 'IpRanges':
3087 ip_ranges = scope.getchildren()
3088 for ipblock in ip_ranges:
3089 for block in ipblock:
3090 parsed_respond[block.tag.split("}")[1]] = block.text
3091 else:
3092 parsed_respond[tag_key] = scope.text
3093
3094 # parse children section for other attrib
3095 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3096 if children_section is not None:
3097 parsed_respond['name'] = children_section.attrib['name']
3098 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3099 if "nestedHypervisorEnabled" in children_section.attrib else None
3100 parsed_respond['deployed'] = children_section.attrib['deployed']
3101 parsed_respond['status'] = children_section.attrib['status']
3102 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3103 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3104 nic_list = []
3105 for adapters in network_adapter:
3106 adapter_key = adapters.tag.split("}")[1]
3107 if adapter_key == 'PrimaryNetworkConnectionIndex':
3108 parsed_respond['primarynetwork'] = adapters.text
3109 if adapter_key == 'NetworkConnection':
3110 vnic = {}
3111 if 'network' in adapters.attrib:
3112 vnic['network'] = adapters.attrib['network']
3113 for adapter in adapters:
3114 setting_key = adapter.tag.split("}")[1]
3115 vnic[setting_key] = adapter.text
3116 nic_list.append(vnic)
3117
3118 for link in children_section:
3119 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3120 if link.attrib['rel'] == 'screen:acquireTicket':
3121 parsed_respond['acquireTicket'] = link.attrib
3122 if link.attrib['rel'] == 'screen:acquireMksTicket':
3123 parsed_respond['acquireMksTicket'] = link.attrib
3124
3125 parsed_respond['interfaces'] = nic_list
3126 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3127 if vCloud_extension_section is not None:
3128 vm_vcenter_info = {}
3129 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3130 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3131 if vmext is not None:
3132 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3133 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3134
3135 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3136 vm_virtual_hardware_info = {}
3137 if virtual_hardware_section is not None:
3138 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3139 if item.find("rasd:Description",namespaces).text == "Hard disk":
3140 disk_size = item.find("rasd:HostResource" ,namespaces
3141 ).attrib["{"+namespaces['vm']+"}capacity"]
3142
3143 vm_virtual_hardware_info["disk_size"]= disk_size
3144 break
3145
3146 for link in virtual_hardware_section:
3147 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3148 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3149 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3150 break
3151
3152 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3153 except Exception as exp :
3154 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3155 return parsed_respond
3156
3157 def acuire_console(self, vm_uuid=None):
3158
3159 vca = self.connect()
3160 if not vca:
3161 raise vimconn.vimconnConnectionException("self.connect() is failed")
3162 if vm_uuid is None:
3163 return None
3164
3165 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3166 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3167 console_dict = vm_dict['acquireTicket']
3168 console_rest_call = console_dict['href']
3169
3170 response = Http.post(url=console_rest_call,
3171 headers=vca.vcloud_session.get_vcloud_headers(),
3172 verify=vca.verify,
3173 logger=vca.logger)
3174
3175 if response.status_code == requests.codes.ok:
3176 return response.content
3177
3178 return None
3179
3180 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3181 """
3182 Method retrieve vm disk details
3183
3184 Args:
3185 vapp_uuid - is vapp identifier.
3186 flavor_disk - disk size as specified in VNFD (flavor)
3187
3188 Returns:
3189 The return network uuid or return None
3190 """
3191 status = None
3192 try:
3193 #Flavor disk is in GB convert it into MB
3194 flavor_disk = int(flavor_disk) * 1024
3195 vm_details = self.get_vapp_details_rest(vapp_uuid)
3196 if vm_details:
3197 vm_name = vm_details["name"]
3198 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3199
3200 if vm_details and "vm_virtual_hardware" in vm_details:
3201 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3202 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3203
3204 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3205
3206 if flavor_disk > vm_disk:
3207 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3208 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3209 vm_disk, flavor_disk ))
3210 else:
3211 status = True
3212 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3213
3214 return status
3215 except Exception as exp:
3216 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3217
3218
3219 def modify_vm_disk_rest(self, disk_href , disk_size):
3220 """
3221 Method retrieve modify vm disk size
3222
3223 Args:
3224 disk_href - vCD API URL to GET and PUT disk data
3225 disk_size - disk size as specified in VNFD (flavor)
3226
3227 Returns:
3228 The return network uuid or return None
3229 """
3230 vca = self.connect()
3231 if not vca:
3232 raise vimconn.vimconnConnectionException("self.connect() is failed")
3233 if disk_href is None or disk_size is None:
3234 return None
3235
3236 if vca.vcloud_session and vca.vcloud_session.organization:
3237 response = Http.get(url=disk_href,
3238 headers=vca.vcloud_session.get_vcloud_headers(),
3239 verify=vca.verify,
3240 logger=vca.logger)
3241
3242 if response.status_code != requests.codes.ok:
3243 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3244 response.status_code))
3245 return None
3246 try:
3247 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3248 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3249 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3250
3251 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3252 if item.find("rasd:Description",namespaces).text == "Hard disk":
3253 disk_item = item.find("rasd:HostResource" ,namespaces )
3254 if disk_item is not None:
3255 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3256 break
3257
3258 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3259 xml_declaration=True)
3260
3261 #Send PUT request to modify disk size
3262 headers = vca.vcloud_session.get_vcloud_headers()
3263 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3264
3265 response = Http.put(url=disk_href,
3266 data=data,
3267 headers=headers,
3268 verify=vca.verify, logger=self.logger)
3269
3270 if response.status_code != 202:
3271 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3272 response.status_code))
3273 else:
3274 modify_disk_task = taskType.parseString(response.content, True)
3275 if type(modify_disk_task) is GenericTask:
3276 status = vca.block_until_completed(modify_disk_task)
3277 return status
3278
3279 return None
3280
3281 except Exception as exp :
3282 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3283 return None
3284
3285 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3286 """
3287 Method to attach pci devices to VM
3288
3289 Args:
3290 vapp_uuid - uuid of vApp/VM
3291 pci_devices - pci devices infromation as specified in VNFD (flavor)
3292
3293 Returns:
3294 The status of add pci device task , vm object and
3295 vcenter_conect object
3296 """
3297 vm_obj = None
3298 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3299 vcenter_conect, content = self.get_vcenter_content()
3300 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3301
3302 if vm_moref_id:
3303 try:
3304 no_of_pci_devices = len(pci_devices)
3305 if no_of_pci_devices > 0:
3306 #Get VM and its host
3307 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3308 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3309 if host_obj and vm_obj:
3310 #get PCI devies from host on which vapp is currently installed
3311 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3312
3313 if avilable_pci_devices is None:
3314 #find other hosts with active pci devices
3315 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3316 content,
3317 no_of_pci_devices
3318 )
3319
3320 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3321 #Migrate vm to the host where PCI devices are availble
3322 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3323 task = self.relocate_vm(new_host_obj, vm_obj)
3324 if task is not None:
3325 result = self.wait_for_vcenter_task(task, vcenter_conect)
3326 self.logger.info("Migrate VM status: {}".format(result))
3327 host_obj = new_host_obj
3328 else:
3329 self.logger.info("Fail to migrate VM : {}".format(result))
3330 raise vimconn.vimconnNotFoundException(
3331 "Fail to migrate VM : {} to host {}".format(
3332 vmname_andid,
3333 new_host_obj)
3334 )
3335
3336 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3337 #Add PCI devices one by one
3338 for pci_device in avilable_pci_devices:
3339 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3340 if task:
3341 status= self.wait_for_vcenter_task(task, vcenter_conect)
3342 if status:
3343 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3344 else:
3345 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3346 return True, vm_obj, vcenter_conect
3347 else:
3348 self.logger.error("Currently there is no host with"\
3349 " {} number of avaialble PCI devices required for VM {}".format(
3350 no_of_pci_devices,
3351 vmname_andid)
3352 )
3353 raise vimconn.vimconnNotFoundException(
3354 "Currently there is no host with {} "\
3355 "number of avaialble PCI devices required for VM {}".format(
3356 no_of_pci_devices,
3357 vmname_andid))
3358 else:
3359 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3360
3361 except vmodl.MethodFault as error:
3362 self.logger.error("Error occurred while adding PCI devices {} ",error)
3363 return None, vm_obj, vcenter_conect
3364
3365 def get_vm_obj(self, content, mob_id):
3366 """
3367 Method to get the vsphere VM object associated with a given morf ID
3368 Args:
3369 vapp_uuid - uuid of vApp/VM
3370 content - vCenter content object
3371 mob_id - mob_id of VM
3372
3373 Returns:
3374 VM and host object
3375 """
3376 vm_obj = None
3377 host_obj = None
3378 try :
3379 container = content.viewManager.CreateContainerView(content.rootFolder,
3380 [vim.VirtualMachine], True
3381 )
3382 for vm in container.view:
3383 mobID = vm._GetMoId()
3384 if mobID == mob_id:
3385 vm_obj = vm
3386 host_obj = vm_obj.runtime.host
3387 break
3388 except Exception as exp:
3389 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3390 return host_obj, vm_obj
3391
3392 def get_pci_devices(self, host, need_devices):
3393 """
3394 Method to get the details of pci devices on given host
3395 Args:
3396 host - vSphere host object
3397 need_devices - number of pci devices needed on host
3398
3399 Returns:
3400 array of pci devices
3401 """
3402 all_devices = []
3403 all_device_ids = []
3404 used_devices_ids = []
3405
3406 try:
3407 if host:
3408 pciPassthruInfo = host.config.pciPassthruInfo
3409 pciDevies = host.hardware.pciDevice
3410
3411 for pci_status in pciPassthruInfo:
3412 if pci_status.passthruActive:
3413 for device in pciDevies:
3414 if device.id == pci_status.id:
3415 all_device_ids.append(device.id)
3416 all_devices.append(device)
3417
3418 #check if devices are in use
3419 avalible_devices = all_devices
3420 for vm in host.vm:
3421 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3422 vm_devices = vm.config.hardware.device
3423 for device in vm_devices:
3424 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3425 if device.backing.id in all_device_ids:
3426 for use_device in avalible_devices:
3427 if use_device.id == device.backing.id:
3428 avalible_devices.remove(use_device)
3429 used_devices_ids.append(device.backing.id)
3430 self.logger.debug("Device {} from devices {}"\
3431 "is in use".format(device.backing.id,
3432 device)
3433 )
3434 if len(avalible_devices) < need_devices:
3435 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3436 need_devices))
3437 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3438 avalible_devices))
3439 return None
3440 else:
3441 required_devices = avalible_devices[:need_devices]
3442 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3443 len(avalible_devices),
3444 host,
3445 need_devices))
3446 self.logger.info("Retruning {} devices as {}".format(need_devices,
3447 required_devices ))
3448 return required_devices
3449
3450 except Exception as exp:
3451 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3452
3453 return None
3454
3455 def get_host_and_PCIdevices(self, content, need_devices):
3456 """
3457 Method to get the details of pci devices infromation on all hosts
3458
3459 Args:
3460 content - vSphere host object
3461 need_devices - number of pci devices needed on host
3462
3463 Returns:
3464 array of pci devices and host object
3465 """
3466 host_obj = None
3467 pci_device_objs = None
3468 try:
3469 if content:
3470 container = content.viewManager.CreateContainerView(content.rootFolder,
3471 [vim.HostSystem], True)
3472 for host in container.view:
3473 devices = self.get_pci_devices(host, need_devices)
3474 if devices:
3475 host_obj = host
3476 pci_device_objs = devices
3477 break
3478 except Exception as exp:
3479 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3480
3481 return host_obj,pci_device_objs
3482
3483 def relocate_vm(self, dest_host, vm) :
3484 """
3485 Method to get the relocate VM to new host
3486
3487 Args:
3488 dest_host - vSphere host object
3489 vm - vSphere VM object
3490
3491 Returns:
3492 task object
3493 """
3494 task = None
3495 try:
3496 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3497 task = vm.Relocate(relocate_spec)
3498 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3499 except Exception as exp:
3500 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3501 dest_host, vm, exp))
3502 return task
3503
3504 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3505 """
3506 Waits and provides updates on a vSphere task
3507 """
3508 while task.info.state == vim.TaskInfo.State.running:
3509 time.sleep(2)
3510
3511 if task.info.state == vim.TaskInfo.State.success:
3512 if task.info.result is not None and not hideResult:
3513 self.logger.info('{} completed successfully, result: {}'.format(
3514 actionName,
3515 task.info.result))
3516 else:
3517 self.logger.info('Task {} completed successfully.'.format(actionName))
3518 else:
3519 self.logger.error('{} did not complete successfully: {} '.format(
3520 actionName,
3521 task.info.error)
3522 )
3523
3524 return task.info.result
3525
3526 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3527 """
3528 Method to add pci device in given VM
3529
3530 Args:
3531 host_object - vSphere host object
3532 vm_object - vSphere VM object
3533 host_pci_dev - host_pci_dev must be one of the devices from the
3534 host_object.hardware.pciDevice list
3535 which is configured as a PCI passthrough device
3536
3537 Returns:
3538 task object
3539 """
3540 task = None
3541 if vm_object and host_object and host_pci_dev:
3542 try :
3543 #Add PCI device to VM
3544 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3545 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3546
3547 if host_pci_dev.id not in systemid_by_pciid:
3548 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3549 return None
3550
3551 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3552 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3553 id=host_pci_dev.id,
3554 systemId=systemid_by_pciid[host_pci_dev.id],
3555 vendorId=host_pci_dev.vendorId,
3556 deviceName=host_pci_dev.deviceName)
3557
3558 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3559
3560 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3561 new_device_config.operation = "add"
3562 vmConfigSpec = vim.vm.ConfigSpec()
3563 vmConfigSpec.deviceChange = [new_device_config]
3564
3565 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3566 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3567 host_pci_dev, vm_object, host_object)
3568 )
3569 except Exception as exp:
3570 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3571 host_pci_dev,
3572 vm_object,
3573 exp))
3574 return task
3575
3576 def get_vm_vcenter_info(self):
3577 """
3578 Method to get details of vCenter and vm
3579
3580 Args:
3581 vapp_uuid - uuid of vApp or VM
3582
3583 Returns:
3584 Moref Id of VM and deails of vCenter
3585 """
3586 vm_vcenter_info = {}
3587
3588 if self.vcenter_ip is not None:
3589 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3590 else:
3591 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3592 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3593 if self.vcenter_port is not None:
3594 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3595 else:
3596 raise vimconn.vimconnException(message="vCenter port is not provided."\
3597 " Please provide vCenter port while attaching datacenter to tenant in --config")
3598 if self.vcenter_user is not None:
3599 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3600 else:
3601 raise vimconn.vimconnException(message="vCenter user is not provided."\
3602 " Please provide vCenter user while attaching datacenter to tenant in --config")
3603
3604 if self.vcenter_password is not None:
3605 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3606 else:
3607 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3608 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3609
3610 return vm_vcenter_info
3611
3612
3613 def get_vm_pci_details(self, vmuuid):
3614 """
3615 Method to get VM PCI device details from vCenter
3616
3617 Args:
3618 vm_obj - vSphere VM object
3619
3620 Returns:
3621 dict of PCI devives attached to VM
3622
3623 """
3624 vm_pci_devices_info = {}
3625 try:
3626 vcenter_conect, content = self.get_vcenter_content()
3627 vm_moref_id = self.get_vm_moref_id(vmuuid)
3628 if vm_moref_id:
3629 #Get VM and its host
3630 if content:
3631 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3632 if host_obj and vm_obj:
3633 vm_pci_devices_info["host_name"]= host_obj.name
3634 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3635 for device in vm_obj.config.hardware.device:
3636 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3637 device_details={'devide_id':device.backing.id,
3638 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3639 }
3640 vm_pci_devices_info[device.deviceInfo.label] = device_details
3641 else:
3642 self.logger.error("Can not connect to vCenter while getting "\
3643 "PCI devices infromationn")
3644 return vm_pci_devices_info
3645 except Exception as exp:
3646 self.logger.error("Error occurred while getting VM infromationn"\
3647 " for VM : {}".format(exp))
3648 raise vimconn.vimconnException(message=exp)
3649
3650 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3651 """
3652 Method to add network adapter type to vm
3653 Args :
3654 network_name - name of network
3655 primary_nic_index - int value for primary nic index
3656 nicIndex - int value for nic index
3657 nic_type - specify model name to which add to vm
3658 Returns:
3659 None
3660 """
3661 vca = self.connect()
3662 if not vca:
3663 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3664
3665 try:
3666 ip_address = None
3667 floating_ip = False
3668 if 'floating_ip' in net: floating_ip = net['floating_ip']
3669
3670 # Stub for ip_address feature
3671 if 'ip_address' in net: ip_address = net['ip_address']
3672
3673 if floating_ip:
3674 allocation_mode = "POOL"
3675 elif ip_address:
3676 allocation_mode = "MANUAL"
3677 else:
3678 allocation_mode = "DHCP"
3679
3680 if not nic_type:
3681 for vms in vapp._get_vms():
3682 vm_id = (vms.id).split(':')[-1]
3683
3684 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3685
3686 response = Http.get(url=url_rest_call,
3687 headers=vca.vcloud_session.get_vcloud_headers(),
3688 verify=vca.verify,
3689 logger=vca.logger)
3690 if response.status_code != 200:
3691 self.logger.error("REST call {} failed reason : {}"\
3692 "status code : {}".format(url_rest_call,
3693 response.content,
3694 response.status_code))
3695 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3696 "network connection section")
3697
3698 data = response.content
3699 if '<PrimaryNetworkConnectionIndex>' not in data:
3700 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3701 <NetworkConnection network="{}">
3702 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3703 <IsConnected>true</IsConnected>
3704 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3705 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3706 allocation_mode)
3707 # Stub for ip_address feature
3708 if ip_address:
3709 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3710 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3711
3712 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3713 else:
3714 new_item = """<NetworkConnection network="{}">
3715 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3716 <IsConnected>true</IsConnected>
3717 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3718 </NetworkConnection>""".format(network_name, nicIndex,
3719 allocation_mode)
3720 # Stub for ip_address feature
3721 if ip_address:
3722 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3723 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3724
3725 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3726
3727 headers = vca.vcloud_session.get_vcloud_headers()
3728 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3729 response = Http.put(url=url_rest_call, headers=headers, data=data,
3730 verify=vca.verify,
3731 logger=vca.logger)
3732 if response.status_code != 202:
3733 self.logger.error("REST call {} failed reason : {}"\
3734 "status code : {} ".format(url_rest_call,
3735 response.content,
3736 response.status_code))
3737 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3738 "network connection section")
3739 else:
3740 nic_task = taskType.parseString(response.content, True)
3741 if isinstance(nic_task, GenericTask):
3742 vca.block_until_completed(nic_task)
3743 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3744 "default NIC type".format(vm_id))
3745 else:
3746 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3747 "connect NIC type".format(vm_id))
3748 else:
3749 for vms in vapp._get_vms():
3750 vm_id = (vms.id).split(':')[-1]
3751
3752 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
3753
3754 response = Http.get(url=url_rest_call,
3755 headers=vca.vcloud_session.get_vcloud_headers(),
3756 verify=vca.verify,
3757 logger=vca.logger)
3758 if response.status_code != 200:
3759 self.logger.error("REST call {} failed reason : {}"\
3760 "status code : {}".format(url_rest_call,
3761 response.content,
3762 response.status_code))
3763 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3764 "network connection section")
3765 data = response.content
3766 if '<PrimaryNetworkConnectionIndex>' not in data:
3767 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3768 <NetworkConnection network="{}">
3769 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3770 <IsConnected>true</IsConnected>
3771 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3772 <NetworkAdapterType>{}</NetworkAdapterType>
3773 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3774 allocation_mode, nic_type)
3775 # Stub for ip_address feature
3776 if ip_address:
3777 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3778 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3779
3780 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3781 else:
3782 new_item = """<NetworkConnection network="{}">
3783 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3784 <IsConnected>true</IsConnected>
3785 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3786 <NetworkAdapterType>{}</NetworkAdapterType>
3787 </NetworkConnection>""".format(network_name, nicIndex,
3788 allocation_mode, nic_type)
3789 # Stub for ip_address feature
3790 if ip_address:
3791 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3792 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3793
3794 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3795
3796 headers = vca.vcloud_session.get_vcloud_headers()
3797 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3798 response = Http.put(url=url_rest_call, headers=headers, data=data,
3799 verify=vca.verify,
3800 logger=vca.logger)
3801
3802 if response.status_code != 202:
3803 self.logger.error("REST call {} failed reason : {}"\
3804 "status code : {}".format(url_rest_call,
3805 response.content,
3806 response.status_code))
3807 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3808 "network connection section")
3809 else:
3810 nic_task = taskType.parseString(response.content, True)
3811 if isinstance(nic_task, GenericTask):
3812 vca.block_until_completed(nic_task)
3813 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3814 "conneced to NIC type {}".format(vm_id, nic_type))
3815 else:
3816 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3817 "failed to connect NIC type {}".format(vm_id, nic_type))
3818 except Exception as exp:
3819 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3820 "while adding Network adapter")
3821 raise vimconn.vimconnException(message=exp)
3822
3823
3824 def set_numa_affinity(self, vmuuid, paired_threads_id):
3825 """
3826 Method to assign numa affinity in vm configuration parammeters
3827 Args :
3828 vmuuid - vm uuid
3829 paired_threads_id - one or more virtual processor
3830 numbers
3831 Returns:
3832 return if True
3833 """
3834 try:
3835 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3836 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3837 context = None
3838 if hasattr(ssl, '_create_unverified_context'):
3839 context = ssl._create_unverified_context()
3840 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3841 pwd=self.passwd, port=int(vm_vcenter_port),
3842 sslContext=context)
3843 atexit.register(Disconnect, vcenter_conect)
3844 content = vcenter_conect.RetrieveContent()
3845
3846 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3847 if vm_obj:
3848 config_spec = vim.vm.ConfigSpec()
3849 config_spec.extraConfig = []
3850 opt = vim.option.OptionValue()
3851 opt.key = 'numa.nodeAffinity'
3852 opt.value = str(paired_threads_id)
3853 config_spec.extraConfig.append(opt)
3854 task = vm_obj.ReconfigVM_Task(config_spec)
3855 if task:
3856 result = self.wait_for_vcenter_task(task, vcenter_conect)
3857 extra_config = vm_obj.config.extraConfig
3858 flag = False
3859 for opts in extra_config:
3860 if 'numa.nodeAffinity' in opts.key:
3861 flag = True
3862 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
3863 "value {} for vm {}".format(opt.value, vm_obj))
3864 if flag:
3865 return
3866 else:
3867 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
3868 except Exception as exp:
3869 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
3870 "for VM {} : {}".format(vm_obj, vm_moref_id))
3871 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
3872 "affinity".format(exp))
3873
3874
3875
3876 def cloud_init(self, vapp, cloud_config):
3877 """
3878 Method to inject ssh-key
3879 vapp - vapp object
3880 cloud_config a dictionary with:
3881 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
3882 'users': (optional) list of users to be inserted, each item is a dict with:
3883 'name': (mandatory) user name,
3884 'key-pairs': (optional) list of strings with the public key to be inserted to the user
3885 'user-data': (optional) string is a text script to be passed directly to cloud-init
3886 'config-files': (optional). List of files to be transferred. Each item is a dict with:
3887 'dest': (mandatory) string with the destination absolute path
3888 'encoding': (optional, by default text). Can be one of:
3889 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
3890 'content' (mandatory): string with the content of the file
3891 'permissions': (optional) string with file permissions, typically octal notation '0644'
3892 'owner': (optional) file owner, string with the format 'owner:group'
3893 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
3894 """
3895 vca = self.connect()
3896 if not vca:
3897 raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
3898
3899 try:
3900 if isinstance(cloud_config, dict):
3901 key_pairs = []
3902 userdata = []
3903 if "key-pairs" in cloud_config:
3904 key_pairs = cloud_config["key-pairs"]
3905
3906 if "users" in cloud_config:
3907 userdata = cloud_config["users"]
3908
3909 for key in key_pairs:
3910 for user in userdata:
3911 if 'name' in user: user_name = user['name']
3912 if 'key-pairs' in user and len(user['key-pairs']) > 0:
3913 for user_key in user['key-pairs']:
3914 customize_script = """
3915 #!/bin/bash
3916 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3917 if [ "$1" = "precustomization" ];then
3918 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3919 if [ ! -d /root/.ssh ];then
3920 mkdir /root/.ssh
3921 chown root:root /root/.ssh
3922 chmod 700 /root/.ssh
3923 touch /root/.ssh/authorized_keys
3924 chown root:root /root/.ssh/authorized_keys
3925 chmod 600 /root/.ssh/authorized_keys
3926 # make centos with selinux happy
3927 which restorecon && restorecon -Rv /root/.ssh
3928 echo '{key}' >> /root/.ssh/authorized_keys
3929 else
3930 touch /root/.ssh/authorized_keys
3931 chown root:root /root/.ssh/authorized_keys
3932 chmod 600 /root/.ssh/authorized_keys
3933 echo '{key}' >> /root/.ssh/authorized_keys
3934 fi
3935 if [ -d /home/{user_name} ];then
3936 if [ ! -d /home/{user_name}/.ssh ];then
3937 mkdir /home/{user_name}/.ssh
3938 chown {user_name}:{user_name} /home/{user_name}/.ssh
3939 chmod 700 /home/{user_name}/.ssh
3940 touch /home/{user_name}/.ssh/authorized_keys
3941 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3942 chmod 600 /home/{user_name}/.ssh/authorized_keys
3943 # make centos with selinux happy
3944 which restorecon && restorecon -Rv /home/{user_name}/.ssh
3945 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3946 else
3947 touch /home/{user_name}/.ssh/authorized_keys
3948 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3949 chmod 600 /home/{user_name}/.ssh/authorized_keys
3950 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3951 fi
3952 fi
3953 fi""".format(key=key, user_name=user_name, user_key=user_key)
3954
3955 for vm in vapp._get_vms():
3956 vm_name = vm.name
3957 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
3958 if isinstance(task, GenericTask):
3959 vca.block_until_completed(task)
3960 self.logger.info("cloud_init : customized guest os task "\
3961 "completed for VM {}".format(vm_name))
3962 else:
3963 self.logger.error("cloud_init : task for customized guest os"\
3964 "failed for VM {}".format(vm_name))
3965 except Exception as exp:
3966 self.logger.error("cloud_init : exception occurred while injecting "\
3967 "ssh-key")
3968 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
3969 "ssh-key".format(exp))
3970
3971
3972 def add_new_disk(self, vca, vapp_uuid, disk_size):
3973 """
3974 Method to create an empty vm disk
3975
3976 Args:
3977 vapp_uuid - is vapp identifier.
3978 disk_size - size of disk to be created in GB
3979
3980 Returns:
3981 None
3982 """
3983 status = False
3984 vm_details = None
3985 try:
3986 #Disk size in GB, convert it into MB
3987 if disk_size is not None:
3988 disk_size_mb = int(disk_size) * 1024
3989 vm_details = self.get_vapp_details_rest(vapp_uuid)
3990
3991 if vm_details and "vm_virtual_hardware" in vm_details:
3992 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
3993 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3994 status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
3995
3996 except Exception as exp:
3997 msg = "Error occurred while creating new disk {}.".format(exp)
3998 self.rollback_newvm(vapp_uuid, msg)
3999
4000 if status:
4001 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4002 else:
4003 #If failed to add disk, delete VM
4004 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4005 self.rollback_newvm(vapp_uuid, msg)
4006
4007
4008 def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
4009 """
4010 Retrives vApp Disks section & add new empty disk
4011
4012 Args:
4013 disk_href: Disk section href to addd disk
4014 disk_size_mb: Disk size in MB
4015
4016 Returns: Status of add new disk task
4017 """
4018 status = False
4019 if vca.vcloud_session and vca.vcloud_session.organization:
4020 response = Http.get(url=disk_href,
4021 headers=vca.vcloud_session.get_vcloud_headers(),
4022 verify=vca.verify,
4023 logger=vca.logger)
4024
4025 if response.status_code != requests.codes.ok:
4026 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4027 .format(disk_href, response.status_code))
4028 return status
4029 try:
4030 #Find but type & max of instance IDs assigned to disks
4031 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4032 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4033 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4034 instance_id = 0
4035 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4036 if item.find("rasd:Description",namespaces).text == "Hard disk":
4037 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4038 if inst_id > instance_id:
4039 instance_id = inst_id
4040 disk_item = item.find("rasd:HostResource" ,namespaces)
4041 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4042 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4043
4044 instance_id = instance_id + 1
4045 new_item = """<Item>
4046 <rasd:Description>Hard disk</rasd:Description>
4047 <rasd:ElementName>New disk</rasd:ElementName>
4048 <rasd:HostResource
4049 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4050 vcloud:capacity="{}"
4051 vcloud:busSubType="{}"
4052 vcloud:busType="{}"></rasd:HostResource>
4053 <rasd:InstanceID>{}</rasd:InstanceID>
4054 <rasd:ResourceType>17</rasd:ResourceType>
4055 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4056
4057 new_data = response.content
4058 #Add new item at the bottom
4059 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4060
4061 # Send PUT request to modify virtual hardware section with new disk
4062 headers = vca.vcloud_session.get_vcloud_headers()
4063 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4064
4065 response = Http.put(url=disk_href,
4066 data=new_data,
4067 headers=headers,
4068 verify=vca.verify, logger=self.logger)
4069
4070 if response.status_code != 202:
4071 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4072 .format(disk_href, response.status_code, response.content))
4073 else:
4074 add_disk_task = taskType.parseString(response.content, True)
4075 if type(add_disk_task) is GenericTask:
4076 status = vca.block_until_completed(add_disk_task)
4077 if not status:
4078 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4079
4080 except Exception as exp:
4081 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4082
4083 return status
4084
4085
4086 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4087 """
4088 Method to add existing disk to vm
4089 Args :
4090 catalogs - List of VDC catalogs
4091 image_id - Catalog ID
4092 template_name - Name of template in catalog
4093 vapp_uuid - UUID of vApp
4094 Returns:
4095 None
4096 """
4097 disk_info = None
4098 vcenter_conect, content = self.get_vcenter_content()
4099 #find moref-id of vm in image
4100 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4101 image_id=image_id,
4102 )
4103
4104 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4105 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4106 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4107 if catalog_vm_moref_id:
4108 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4109 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4110 if catalog_vm_obj:
4111 #find existing disk
4112 disk_info = self.find_disk(catalog_vm_obj)
4113 else:
4114 exp_msg = "No VM with image id {} found".format(image_id)
4115 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4116 else:
4117 exp_msg = "No Image found with image ID {} ".format(image_id)
4118 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4119
4120 if disk_info:
4121 self.logger.info("Existing disk_info : {}".format(disk_info))
4122 #get VM
4123 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4124 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4125 if vm_obj:
4126 status = self.add_disk(vcenter_conect=vcenter_conect,
4127 vm=vm_obj,
4128 disk_info=disk_info,
4129 size=size,
4130 vapp_uuid=vapp_uuid
4131 )
4132 if status:
4133 self.logger.info("Disk from image id {} added to {}".format(image_id,
4134 vm_obj.config.name)
4135 )
4136 else:
4137 msg = "No disk found with image id {} to add in VM {}".format(
4138 image_id,
4139 vm_obj.config.name)
4140 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4141
4142
4143 def find_disk(self, vm_obj):
4144 """
4145 Method to find details of existing disk in VM
4146 Args :
4147 vm_obj - vCenter object of VM
4148 image_id - Catalog ID
4149 Returns:
4150 disk_info : dict of disk details
4151 """
4152 disk_info = {}
4153 if vm_obj:
4154 try:
4155 devices = vm_obj.config.hardware.device
4156 for device in devices:
4157 if type(device) is vim.vm.device.VirtualDisk:
4158 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4159 disk_info["full_path"] = device.backing.fileName
4160 disk_info["datastore"] = device.backing.datastore
4161 disk_info["capacityKB"] = device.capacityInKB
4162 break
4163 except Exception as exp:
4164 self.logger.error("find_disk() : exception occurred while "\
4165 "getting existing disk details :{}".format(exp))
4166 return disk_info
4167
4168
4169 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4170 """
4171 Method to add existing disk in VM
4172 Args :
4173 vcenter_conect - vCenter content object
4174 vm - vCenter vm object
4175 disk_info : dict of disk details
4176 Returns:
4177 status : status of add disk task
4178 """
4179 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4180 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4181 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4182 if size is not None:
4183 #Convert size from GB to KB
4184 sizeKB = int(size) * 1024 * 1024
4185 #compare size of existing disk and user given size.Assign whicherver is greater
4186 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4187 sizeKB, capacityKB))
4188 if sizeKB > capacityKB:
4189 capacityKB = sizeKB
4190
4191 if datastore and fullpath and capacityKB:
4192 try:
4193 spec = vim.vm.ConfigSpec()
4194 # get all disks on a VM, set unit_number to the next available
4195 unit_number = 0
4196 for dev in vm.config.hardware.device:
4197 if hasattr(dev.backing, 'fileName'):
4198 unit_number = int(dev.unitNumber) + 1
4199 # unit_number 7 reserved for scsi controller
4200 if unit_number == 7:
4201 unit_number += 1
4202 if isinstance(dev, vim.vm.device.VirtualDisk):
4203 #vim.vm.device.VirtualSCSIController
4204 controller_key = dev.controllerKey
4205
4206 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4207 unit_number, controller_key))
4208 # add disk here
4209 dev_changes = []
4210 disk_spec = vim.vm.device.VirtualDeviceSpec()
4211 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4212 disk_spec.device = vim.vm.device.VirtualDisk()
4213 disk_spec.device.backing = \
4214 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4215 disk_spec.device.backing.thinProvisioned = True
4216 disk_spec.device.backing.diskMode = 'persistent'
4217 disk_spec.device.backing.datastore = datastore
4218 disk_spec.device.backing.fileName = fullpath
4219
4220 disk_spec.device.unitNumber = unit_number
4221 disk_spec.device.capacityInKB = capacityKB
4222 disk_spec.device.controllerKey = controller_key
4223 dev_changes.append(disk_spec)
4224 spec.deviceChange = dev_changes
4225 task = vm.ReconfigVM_Task(spec=spec)
4226 status = self.wait_for_vcenter_task(task, vcenter_conect)
4227 return status
4228 except Exception as exp:
4229 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4230 "{} to vm {}".format(exp,
4231 fullpath,
4232 vm.config.name)
4233 self.rollback_newvm(vapp_uuid, exp_msg)
4234 else:
4235 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4236 self.rollback_newvm(vapp_uuid, msg)
4237
4238
4239 def get_vcenter_content(self):
4240 """
4241 Get the vsphere content object
4242 """
4243 try:
4244 vm_vcenter_info = self.get_vm_vcenter_info()
4245 except Exception as exp:
4246 self.logger.error("Error occurred while getting vCenter infromationn"\
4247 " for VM : {}".format(exp))
4248 raise vimconn.vimconnException(message=exp)
4249
4250 context = None
4251 if hasattr(ssl, '_create_unverified_context'):
4252 context = ssl._create_unverified_context()
4253
4254 vcenter_conect = SmartConnect(
4255 host=vm_vcenter_info["vm_vcenter_ip"],
4256 user=vm_vcenter_info["vm_vcenter_user"],
4257 pwd=vm_vcenter_info["vm_vcenter_password"],
4258 port=int(vm_vcenter_info["vm_vcenter_port"]),
4259 sslContext=context
4260 )
4261 atexit.register(Disconnect, vcenter_conect)
4262 content = vcenter_conect.RetrieveContent()
4263 return vcenter_conect, content
4264
4265
4266 def get_vm_moref_id(self, vapp_uuid):
4267 """
4268 Get the moref_id of given VM
4269 """
4270 try:
4271 if vapp_uuid:
4272 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4273 if vm_details and "vm_vcenter_info" in vm_details:
4274 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4275
4276 return vm_moref_id
4277
4278 except Exception as exp:
4279 self.logger.error("Error occurred while getting VM moref ID "\
4280 " for VM : {}".format(exp))
4281 return None
4282
4283
4284 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4285 """
4286 Method to get vApp template details
4287 Args :
4288 catalogs - list of VDC catalogs
4289 image_id - Catalog ID to find
4290 template_name : template name in catalog
4291 Returns:
4292 parsed_respond : dict of vApp tempalte details
4293 """
4294 parsed_response = {}
4295
4296 vca = self.connect_as_admin()
4297 if not vca:
4298 raise vimconn.vimconnConnectionException("self.connect() is failed")
4299
4300 try:
4301 catalog = self.get_catalog_obj(image_id, catalogs)
4302 if catalog:
4303 template_name = self.get_catalogbyid(image_id, catalogs)
4304 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4305 if len(catalog_items) == 1:
4306 response = Http.get(catalog_items[0].get_href(),
4307 headers=vca.vcloud_session.get_vcloud_headers(),
4308 verify=vca.verify,
4309 logger=vca.logger)
4310 catalogItem = XmlElementTree.fromstring(response.content)
4311 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4312 vapp_tempalte_href = entity.get("href")
4313 #get vapp details and parse moref id
4314
4315 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4316 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4317 'vmw': 'http://www.vmware.com/schema/ovf',
4318 'vm': 'http://www.vmware.com/vcloud/v1.5',
4319 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4320 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4321 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4322 }
4323
4324 if vca.vcloud_session and vca.vcloud_session.organization:
4325 response = Http.get(url=vapp_tempalte_href,
4326 headers=vca.vcloud_session.get_vcloud_headers(),
4327 verify=vca.verify,
4328 logger=vca.logger
4329 )
4330
4331 if response.status_code != requests.codes.ok:
4332 self.logger.debug("REST API call {} failed. Return status code {}".format(
4333 vapp_tempalte_href, response.status_code))
4334
4335 else:
4336 xmlroot_respond = XmlElementTree.fromstring(response.content)
4337 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4338 if children_section is not None:
4339 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4340 if vCloud_extension_section is not None:
4341 vm_vcenter_info = {}
4342 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4343 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4344 if vmext is not None:
4345 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4346 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4347
4348 except Exception as exp :
4349 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4350
4351 return parsed_response
4352
4353
4354 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4355 """
4356 Method to delete vApp
4357 Args :
4358 vapp_uuid - vApp UUID
4359 msg - Error message to be logged
4360 exp_type : Exception type
4361 Returns:
4362 None
4363 """
4364 if vapp_uuid:
4365 status = self.delete_vminstance(vapp_uuid)
4366 else:
4367 msg = "No vApp ID"
4368 self.logger.error(msg)
4369 if exp_type == "Genric":
4370 raise vimconn.vimconnException(msg)
4371 elif exp_type == "NotFound":
4372 raise vimconn.vimconnNotFoundException(message=msg)
4373
4374 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4375 """
4376 Method to attach SRIOV adapters to VM
4377
4378 Args:
4379 vapp_uuid - uuid of vApp/VM
4380 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4381 vmname_andid - vmname
4382
4383 Returns:
4384 The status of add SRIOV adapter task , vm object and
4385 vcenter_conect object
4386 """
4387 vm_obj = None
4388 vcenter_conect, content = self.get_vcenter_content()
4389 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4390
4391 if vm_moref_id:
4392 try:
4393 no_of_sriov_devices = len(sriov_nets)
4394 if no_of_sriov_devices > 0:
4395 #Get VM and its host
4396 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4397 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4398 if host_obj and vm_obj:
4399 #get SRIOV devies from host on which vapp is currently installed
4400 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4401 no_of_sriov_devices,
4402 )
4403
4404 if len(avilable_sriov_devices) == 0:
4405 #find other hosts with active pci devices
4406 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4407 content,
4408 no_of_sriov_devices,
4409 )
4410
4411 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4412 #Migrate vm to the host where SRIOV devices are available
4413 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4414 new_host_obj))
4415 task = self.relocate_vm(new_host_obj, vm_obj)
4416 if task is not None:
4417 result = self.wait_for_vcenter_task(task, vcenter_conect)
4418 self.logger.info("Migrate VM status: {}".format(result))
4419 host_obj = new_host_obj
4420 else:
4421 self.logger.info("Fail to migrate VM : {}".format(result))
4422 raise vimconn.vimconnNotFoundException(
4423 "Fail to migrate VM : {} to host {}".format(
4424 vmname_andid,
4425 new_host_obj)
4426 )
4427
4428 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4429 #Add SRIOV devices one by one
4430 for sriov_net in sriov_nets:
4431 network_name = sriov_net.get('net_id')
4432 dvs_portgr_name = self.create_dvPort_group(network_name)
4433 if sriov_net.get('type') == "VF":
4434 #add vlan ID ,Modify portgroup for vlan ID
4435 self.configure_vlanID(content, vcenter_conect, network_name)
4436
4437 task = self.add_sriov_to_vm(content,
4438 vm_obj,
4439 host_obj,
4440 network_name,
4441 avilable_sriov_devices[0]
4442 )
4443 if task:
4444 status= self.wait_for_vcenter_task(task, vcenter_conect)
4445 if status:
4446 self.logger.info("Added SRIOV {} to VM {}".format(
4447 no_of_sriov_devices,
4448 str(vm_obj)))
4449 else:
4450 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4451 no_of_sriov_devices,
4452 str(vm_obj)))
4453 raise vimconn.vimconnUnexpectedResponse(
4454 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4455 )
4456 return True, vm_obj, vcenter_conect
4457 else:
4458 self.logger.error("Currently there is no host with"\
4459 " {} number of avaialble SRIOV "\
4460 "VFs required for VM {}".format(
4461 no_of_sriov_devices,
4462 vmname_andid)
4463 )
4464 raise vimconn.vimconnNotFoundException(
4465 "Currently there is no host with {} "\
4466 "number of avaialble SRIOV devices required for VM {}".format(
4467 no_of_sriov_devices,
4468 vmname_andid))
4469 else:
4470 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4471
4472 except vmodl.MethodFault as error:
4473 self.logger.error("Error occurred while adding SRIOV {} ",error)
4474 return None, vm_obj, vcenter_conect
4475
4476
4477 def get_sriov_devices(self,host, no_of_vfs):
4478 """
4479 Method to get the details of SRIOV devices on given host
4480 Args:
4481 host - vSphere host object
4482 no_of_vfs - number of VFs needed on host
4483
4484 Returns:
4485 array of SRIOV devices
4486 """
4487 sriovInfo=[]
4488 if host:
4489 for device in host.config.pciPassthruInfo:
4490 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4491 if device.numVirtualFunction >= no_of_vfs:
4492 sriovInfo.append(device)
4493 break
4494 return sriovInfo
4495
4496
4497 def get_host_and_sriov_devices(self, content, no_of_vfs):
4498 """
4499 Method to get the details of SRIOV devices infromation on all hosts
4500
4501 Args:
4502 content - vSphere host object
4503 no_of_vfs - number of pci VFs needed on host
4504
4505 Returns:
4506 array of SRIOV devices and host object
4507 """
4508 host_obj = None
4509 sriov_device_objs = None
4510 try:
4511 if content:
4512 container = content.viewManager.CreateContainerView(content.rootFolder,
4513 [vim.HostSystem], True)
4514 for host in container.view:
4515 devices = self.get_sriov_devices(host, no_of_vfs)
4516 if devices:
4517 host_obj = host
4518 sriov_device_objs = devices
4519 break
4520 except Exception as exp:
4521 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4522
4523 return host_obj,sriov_device_objs
4524
4525
4526 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4527 """
4528 Method to add SRIOV adapter to vm
4529
4530 Args:
4531 host_obj - vSphere host object
4532 vm_obj - vSphere vm object
4533 content - vCenter content object
4534 network_name - name of distributed virtaul portgroup
4535 sriov_device - SRIOV device info
4536
4537 Returns:
4538 task object
4539 """
4540 devices = []
4541 vnic_label = "sriov nic"
4542 try:
4543 dvs_portgr = self.get_dvport_group(network_name)
4544 network_name = dvs_portgr.name
4545 nic = vim.vm.device.VirtualDeviceSpec()
4546 # VM device
4547 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4548 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4549 nic.device.addressType = 'assigned'
4550 #nic.device.key = 13016
4551 nic.device.deviceInfo = vim.Description()
4552 nic.device.deviceInfo.label = vnic_label
4553 nic.device.deviceInfo.summary = network_name
4554 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4555
4556 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4557 nic.device.backing.deviceName = network_name
4558 nic.device.backing.useAutoDetect = False
4559 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4560 nic.device.connectable.startConnected = True
4561 nic.device.connectable.allowGuestControl = True
4562
4563 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4564 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4565 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4566
4567 devices.append(nic)
4568 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4569 task = vm_obj.ReconfigVM_Task(vmconf)
4570 return task
4571 except Exception as exp:
4572 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4573 return None
4574
4575
4576 def create_dvPort_group(self, network_name):
4577 """
4578 Method to create disributed virtual portgroup
4579
4580 Args:
4581 network_name - name of network/portgroup
4582
4583 Returns:
4584 portgroup key
4585 """
4586 try:
4587 new_network_name = [network_name, '-', str(uuid.uuid4())]
4588 network_name=''.join(new_network_name)
4589 vcenter_conect, content = self.get_vcenter_content()
4590
4591 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4592 if dv_switch:
4593 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4594 dv_pg_spec.name = network_name
4595
4596 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4597 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4598 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4599 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4600 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4601 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4602
4603 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4604 self.wait_for_vcenter_task(task, vcenter_conect)
4605
4606 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4607 if dvPort_group:
4608 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4609 return dvPort_group.key
4610 else:
4611 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4612
4613 except Exception as exp:
4614 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4615 " : {}".format(network_name, exp))
4616 return None
4617
4618 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4619 """
4620 Method to reconfigure disributed virtual portgroup
4621
4622 Args:
4623 dvPort_group_name - name of disributed virtual portgroup
4624 content - vCenter content object
4625 config_info - disributed virtual portgroup configuration
4626
4627 Returns:
4628 task object
4629 """
4630 try:
4631 dvPort_group = self.get_dvport_group(dvPort_group_name)
4632 if dvPort_group:
4633 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4634 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4635 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4636 if "vlanID" in config_info:
4637 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4638 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4639
4640 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4641 return task
4642 else:
4643 return None
4644 except Exception as exp:
4645 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4646 " : {}".format(dvPort_group_name, exp))
4647 return None
4648
4649
4650 def destroy_dvport_group(self , dvPort_group_name):
4651 """
4652 Method to destroy disributed virtual portgroup
4653
4654 Args:
4655 network_name - name of network/portgroup
4656
4657 Returns:
4658 True if portgroup successfully got deleted else false
4659 """
4660 vcenter_conect, content = self.get_vcenter_content()
4661 try:
4662 status = None
4663 dvPort_group = self.get_dvport_group(dvPort_group_name)
4664 if dvPort_group:
4665 task = dvPort_group.Destroy_Task()
4666 status = self.wait_for_vcenter_task(task, vcenter_conect)
4667 return status
4668 except vmodl.MethodFault as exp:
4669 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4670 exp, dvPort_group_name))
4671 return None
4672
4673
4674 def get_dvport_group(self, dvPort_group_name):
4675 """
4676 Method to get disributed virtual portgroup
4677
4678 Args:
4679 network_name - name of network/portgroup
4680
4681 Returns:
4682 portgroup object
4683 """
4684 vcenter_conect, content = self.get_vcenter_content()
4685 dvPort_group = None
4686 try:
4687 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4688 for item in container.view:
4689 if item.key == dvPort_group_name:
4690 dvPort_group = item
4691 break
4692 return dvPort_group
4693 except vmodl.MethodFault as exp:
4694 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4695 exp, dvPort_group_name))
4696 return None
4697
4698 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4699 """
4700 Method to get disributed virtual portgroup vlanID
4701
4702 Args:
4703 network_name - name of network/portgroup
4704
4705 Returns:
4706 vlan ID
4707 """
4708 vlanId = None
4709 try:
4710 dvPort_group = self.get_dvport_group(dvPort_group_name)
4711 if dvPort_group:
4712 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4713 except vmodl.MethodFault as exp:
4714 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4715 exp, dvPort_group_name))
4716 return vlanId
4717
4718
4719 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4720 """
4721 Method to configure vlanID in disributed virtual portgroup vlanID
4722
4723 Args:
4724 network_name - name of network/portgroup
4725
4726 Returns:
4727 None
4728 """
4729 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4730 if vlanID == 0:
4731 #configure vlanID
4732 vlanID = self.genrate_vlanID(dvPort_group_name)
4733 config = {"vlanID":vlanID}
4734 task = self.reconfig_portgroup(content, dvPort_group_name,
4735 config_info=config)
4736 if task:
4737 status= self.wait_for_vcenter_task(task, vcenter_conect)
4738 if status:
4739 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4740 dvPort_group_name,vlanID))
4741 else:
4742 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4743 dvPort_group_name, vlanID))
4744
4745
4746 def genrate_vlanID(self, network_name):
4747 """
4748 Method to get unused vlanID
4749 Args:
4750 network_name - name of network/portgroup
4751 Returns:
4752 vlanID
4753 """
4754 vlan_id = None
4755 used_ids = []
4756 if self.config.get('vlanID_range') == None:
4757 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4758 "at config value before creating sriov network with vlan tag")
4759 if "used_vlanIDs" not in self.persistent_info:
4760 self.persistent_info["used_vlanIDs"] = {}
4761 else:
4762 used_ids = self.persistent_info["used_vlanIDs"].values()
4763
4764 for vlanID_range in self.config.get('vlanID_range'):
4765 start_vlanid , end_vlanid = vlanID_range.split("-")
4766 if start_vlanid > end_vlanid:
4767 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4768 vlanID_range))
4769
4770 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4771 if id not in used_ids:
4772 vlan_id = id
4773 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4774 return vlan_id
4775 if vlan_id is None:
4776 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4777
4778
4779 def get_obj(self, content, vimtype, name):
4780 """
4781 Get the vsphere object associated with a given text name
4782 """
4783 obj = None
4784 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4785 for item in container.view:
4786 if item.name == name:
4787 obj = item
4788 break
4789 return obj
4790