Adds SFC CRUD interface to the VIM connector
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2017 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import traceback
33 import itertools
34 import requests
35 import ssl
36 import atexit
37
38 from pyVmomi import vim, vmodl
39 from pyVim.connect import SmartConnect, Disconnect
40
41 from xml.etree import ElementTree as XmlElementTree
42 from lxml import etree as lxmlElementTree
43
44 import yaml
45 from pyvcloud import Http
46 from pyvcloud.vcloudair import VCA
47 from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
48 vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
49 networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
50 from xml.sax.saxutils import escape
51
52 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
53 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
54 from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
55 from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
56
57 import logging
58 import json
59 import time
60 import uuid
61 import httplib
62 import hashlib
63 import socket
64 import struct
65 import netaddr
66 import random
67
68 # global variable for vcd connector type
69 STANDALONE = 'standalone'
70
71 # key for flavor dicts
72 FLAVOR_RAM_KEY = 'ram'
73 FLAVOR_VCPUS_KEY = 'vcpus'
74 FLAVOR_DISK_KEY = 'disk'
75 DEFAULT_IP_PROFILE = {'dhcp_count':50,
76 'dhcp_enabled':True,
77 'ip_version':"IPv4"
78 }
79 # global variable for wait time
80 INTERVAL_TIME = 5
81 MAX_WAIT_TIME = 1800
82
83 VCAVERSION = '5.9'
84
85 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
86 __date__ = "$12-Jan-2017 11:09:29$"
87 __version__ = '0.1'
88
89 # -1: "Could not be created",
90 # 0: "Unresolved",
91 # 1: "Resolved",
92 # 2: "Deployed",
93 # 3: "Suspended",
94 # 4: "Powered on",
95 # 5: "Waiting for user input",
96 # 6: "Unknown state",
97 # 7: "Unrecognized state",
98 # 8: "Powered off",
99 # 9: "Inconsistent state",
100 # 10: "Children do not all have the same status",
101 # 11: "Upload initiated, OVF descriptor pending",
102 # 12: "Upload initiated, copying contents",
103 # 13: "Upload initiated , disk contents pending",
104 # 14: "Upload has been quarantined",
105 # 15: "Upload quarantine period has expired"
106
107 # mapping vCD status to MANO
108 vcdStatusCode2manoFormat = {4: 'ACTIVE',
109 7: 'PAUSED',
110 3: 'SUSPENDED',
111 8: 'INACTIVE',
112 12: 'BUILD',
113 -1: 'ERROR',
114 14: 'DELETED'}
115
116 #
117 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
118 'ERROR': 'ERROR', 'DELETED': 'DELETED'
119 }
120
121 class vimconnector(vimconn.vimconnector):
122 # dict used to store flavor in memory
123 flavorlist = {}
124
125 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
126 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
127 """
128 Constructor create vmware connector to vCloud director.
129
130 By default construct doesn't validate connection state. So client can create object with None arguments.
131 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
132
133 a) It initialize organization UUID
134 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
135
136 Args:
137 uuid - is organization uuid.
138 name - is organization name that must be presented in vCloud director.
139 tenant_id - is VDC uuid it must be presented in vCloud director
140 tenant_name - is VDC name.
141 url - is hostname or ip address of vCloud director
142 url_admin - same as above.
143 user - is user that administrator for organization. Caller must make sure that
144 username has right privileges.
145
146 password - is password for a user.
147
148 VMware connector also requires PVDC administrative privileges and separate account.
149 This variables must be passed via config argument dict contains keys
150
151 dict['admin_username']
152 dict['admin_password']
153 config - Provide NSX and vCenter information
154
155 Returns:
156 Nothing.
157 """
158
159 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
160 url_admin, user, passwd, log_level, config)
161
162 self.logger = logging.getLogger('openmano.vim.vmware')
163 self.logger.setLevel(10)
164 self.persistent_info = persistent_info
165
166 self.name = name
167 self.id = uuid
168 self.url = url
169 self.url_admin = url_admin
170 self.tenant_id = tenant_id
171 self.tenant_name = tenant_name
172 self.user = user
173 self.passwd = passwd
174 self.config = config
175 self.admin_password = None
176 self.admin_user = None
177 self.org_name = ""
178 self.nsx_manager = None
179 self.nsx_user = None
180 self.nsx_password = None
181
182 if tenant_name is not None:
183 orgnameandtenant = tenant_name.split(":")
184 if len(orgnameandtenant) == 2:
185 self.tenant_name = orgnameandtenant[1]
186 self.org_name = orgnameandtenant[0]
187 else:
188 self.tenant_name = tenant_name
189 if "orgname" in config:
190 self.org_name = config['orgname']
191
192 if log_level:
193 self.logger.setLevel(getattr(logging, log_level))
194
195 try:
196 self.admin_user = config['admin_username']
197 self.admin_password = config['admin_password']
198 except KeyError:
199 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
200
201 try:
202 self.nsx_manager = config['nsx_manager']
203 self.nsx_user = config['nsx_user']
204 self.nsx_password = config['nsx_password']
205 except KeyError:
206 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
207
208 self.vcenter_ip = config.get("vcenter_ip", None)
209 self.vcenter_port = config.get("vcenter_port", None)
210 self.vcenter_user = config.get("vcenter_user", None)
211 self.vcenter_password = config.get("vcenter_password", None)
212
213 # ############# Stub code for SRIOV #################
214 # try:
215 # self.dvs_name = config['dv_switch_name']
216 # except KeyError:
217 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
218 #
219 # self.vlanID_range = config.get("vlanID_range", None)
220
221 self.org_uuid = None
222 self.vca = None
223
224 if not url:
225 raise vimconn.vimconnException('url param can not be NoneType')
226
227 if not self.url_admin: # try to use normal url
228 self.url_admin = self.url
229
230 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
231 self.tenant_id, self.tenant_name))
232 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
233 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
234
235 # initialize organization
236 if self.user is not None and self.passwd is not None and self.url:
237 self.init_organization()
238
239 def __getitem__(self, index):
240 if index == 'name':
241 return self.name
242 if index == 'tenant_id':
243 return self.tenant_id
244 if index == 'tenant_name':
245 return self.tenant_name
246 elif index == 'id':
247 return self.id
248 elif index == 'org_name':
249 return self.org_name
250 elif index == 'org_uuid':
251 return self.org_uuid
252 elif index == 'user':
253 return self.user
254 elif index == 'passwd':
255 return self.passwd
256 elif index == 'url':
257 return self.url
258 elif index == 'url_admin':
259 return self.url_admin
260 elif index == "config":
261 return self.config
262 else:
263 raise KeyError("Invalid key '%s'" % str(index))
264
265 def __setitem__(self, index, value):
266 if index == 'name':
267 self.name = value
268 if index == 'tenant_id':
269 self.tenant_id = value
270 if index == 'tenant_name':
271 self.tenant_name = value
272 elif index == 'id':
273 self.id = value
274 elif index == 'org_name':
275 self.org_name = value
276 elif index == 'org_uuid':
277 self.org_uuid = value
278 elif index == 'user':
279 self.user = value
280 elif index == 'passwd':
281 self.passwd = value
282 elif index == 'url':
283 self.url = value
284 elif index == 'url_admin':
285 self.url_admin = value
286 else:
287 raise KeyError("Invalid key '%s'" % str(index))
288
289 def connect_as_admin(self):
290 """ Method connect as pvdc admin user to vCloud director.
291 There are certain action that can be done only by provider vdc admin user.
292 Organization creation / provider network creation etc.
293
294 Returns:
295 The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
296 """
297
298 self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
299
300 vca_admin = VCA(host=self.url,
301 username=self.admin_user,
302 service_type=STANDALONE,
303 version=VCAVERSION,
304 verify=False,
305 log=False)
306 result = vca_admin.login(password=self.admin_password, org='System')
307 if not result:
308 raise vimconn.vimconnConnectionException(
309 "Can't connect to a vCloud director as: {}".format(self.admin_user))
310 result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
311 if result is True:
312 self.logger.info(
313 "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
314
315 return vca_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return vca object that letter can be used to connect to vCloud director as admin for VDC
322 """
323
324 try:
325 self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
326 self.user,
327 self.org_name))
328 vca = VCA(host=self.url,
329 username=self.user,
330 service_type=STANDALONE,
331 version=VCAVERSION,
332 verify=False,
333 log=False)
334
335 result = vca.login(password=self.passwd, org=self.org_name)
336 if not result:
337 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
338 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
339 if result is True:
340 self.logger.info(
341 "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
342
343 except:
344 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
345 "{} as user: {}".format(self.org_name, self.user))
346
347 return vca
348
349 def init_organization(self):
350 """ Method initialize organization UUID and VDC parameters.
351
352 At bare minimum client must provide organization name that present in vCloud director and VDC.
353
354 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
355 The Org - UUID will be initialized at the run time if data center present in vCloud director.
356
357 Returns:
358 The return vca object that letter can be used to connect to vcloud direct as admin
359 """
360 vca = self.connect()
361 if not vca:
362 raise vimconn.vimconnConnectionException("self.connect() is failed.")
363
364 self.vca = vca
365 try:
366 if self.org_uuid is None:
367 org_dict = self.get_org_list()
368 for org in org_dict:
369 # we set org UUID at the init phase but we can do it only when we have valid credential.
370 if org_dict[org] == self.org_name:
371 self.org_uuid = org
372 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
373 break
374 else:
375 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
376
377 # if well good we require for org details
378 org_details_dict = self.get_org(org_uuid=self.org_uuid)
379
380 # we have two case if we want to initialize VDC ID or VDC name at run time
381 # tenant_name provided but no tenant id
382 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
383 vdcs_dict = org_details_dict['vdcs']
384 for vdc in vdcs_dict:
385 if vdcs_dict[vdc] == self.tenant_name:
386 self.tenant_id = vdc
387 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
388 self.org_name))
389 break
390 else:
391 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
392 # case two we have tenant_id but we don't have tenant name so we find and set it.
393 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
394 vdcs_dict = org_details_dict['vdcs']
395 for vdc in vdcs_dict:
396 if vdc == self.tenant_id:
397 self.tenant_name = vdcs_dict[vdc]
398 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
399 self.org_name))
400 break
401 else:
402 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
403 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
404 except:
405 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
406 self.logger.debug(traceback.format_exc())
407 self.org_uuid = None
408
409 def new_tenant(self, tenant_name=None, tenant_description=None):
410 """ Method adds a new tenant to VIM with this name.
411 This action requires access to create VDC action in vCloud director.
412
413 Args:
414 tenant_name is tenant_name to be created.
415 tenant_description not used for this call
416
417 Return:
418 returns the tenant identifier in UUID format.
419 If action is failed method will throw vimconn.vimconnException method
420 """
421 vdc_task = self.create_vdc(vdc_name=tenant_name)
422 if vdc_task is not None:
423 vdc_uuid, value = vdc_task.popitem()
424 self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
425 return vdc_uuid
426 else:
427 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
428
429 def delete_tenant(self, tenant_id=None):
430 """Delete a tenant from VIM"""
431 'Returns the tenant identifier'
432 raise vimconn.vimconnNotImplemented("Should have implemented this")
433
434 def get_tenant_list(self, filter_dict={}):
435 """Obtain tenants of VIM
436 filter_dict can contain the following keys:
437 name: filter by tenant name
438 id: filter by tenant uuid/id
439 <other VIM specific>
440 Returns the tenant list of dictionaries:
441 [{'name':'<name>, 'id':'<id>, ...}, ...]
442
443 """
444 org_dict = self.get_org(self.org_uuid)
445 vdcs_dict = org_dict['vdcs']
446
447 vdclist = []
448 try:
449 for k in vdcs_dict:
450 entry = {'name': vdcs_dict[k], 'id': k}
451 # if caller didn't specify dictionary we return all tenants.
452 if filter_dict is not None and filter_dict:
453 filtered_entry = entry.copy()
454 filtered_dict = set(entry.keys()) - set(filter_dict)
455 for unwanted_key in filtered_dict: del entry[unwanted_key]
456 if filter_dict == entry:
457 vdclist.append(filtered_entry)
458 else:
459 vdclist.append(entry)
460 except:
461 self.logger.debug("Error in get_tenant_list()")
462 self.logger.debug(traceback.format_exc())
463 raise vimconn.vimconnException("Incorrect state. {}")
464
465 return vdclist
466
467 def new_network(self, net_name, net_type, ip_profile=None, shared=False):
468 """Adds a tenant network to VIM
469 net_name is the name
470 net_type can be 'bridge','data'.'ptp'.
471 ip_profile is a dict containing the IP parameters of the network
472 shared is a boolean
473 Returns the network identifier"""
474
475 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
476 .format(net_name, net_type, ip_profile, shared))
477
478 isshared = 'false'
479 if shared:
480 isshared = 'true'
481
482 # ############# Stub code for SRIOV #################
483 # if net_type == "data" or net_type == "ptp":
484 # if self.config.get('dv_switch_name') == None:
485 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
486 # network_uuid = self.create_dvPort_group(net_name)
487
488 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
489 ip_profile=ip_profile, isshared=isshared)
490 if network_uuid is not None:
491 return network_uuid
492 else:
493 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
494
495 def get_vcd_network_list(self):
496 """ Method available organization for a logged in tenant
497
498 Returns:
499 The return vca object that letter can be used to connect to vcloud direct as admin
500 """
501
502 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
503
504 if not self.tenant_name:
505 raise vimconn.vimconnConnectionException("Tenant name is empty.")
506
507 vdc = self.get_vdc_details()
508 if vdc is None:
509 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
510
511 vdc_uuid = vdc.get_id().split(":")[3]
512 networks = self.vca.get_networks(vdc.get_name())
513 network_list = []
514 try:
515 for network in networks:
516 filter_dict = {}
517 netid = network.get_id().split(":")
518 if len(netid) != 4:
519 continue
520
521 filter_dict["name"] = network.get_name()
522 filter_dict["id"] = netid[3]
523 filter_dict["shared"] = network.get_IsShared()
524 filter_dict["tenant_id"] = vdc_uuid
525 if network.get_status() == 1:
526 filter_dict["admin_state_up"] = True
527 else:
528 filter_dict["admin_state_up"] = False
529 filter_dict["status"] = "ACTIVE"
530 filter_dict["type"] = "bridge"
531 network_list.append(filter_dict)
532 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
533 except:
534 self.logger.debug("Error in get_vcd_network_list")
535 self.logger.debug(traceback.format_exc())
536 pass
537
538 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
539 return network_list
540
541 def get_network_list(self, filter_dict={}):
542 """Obtain tenant networks of VIM
543 Filter_dict can be:
544 name: network name OR/AND
545 id: network uuid OR/AND
546 shared: boolean OR/AND
547 tenant_id: tenant OR/AND
548 admin_state_up: boolean
549 status: 'ACTIVE'
550
551 [{key : value , key : value}]
552
553 Returns the network list of dictionaries:
554 [{<the fields at Filter_dict plus some VIM specific>}, ...]
555 List can be empty
556 """
557
558 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
559
560 if not self.tenant_name:
561 raise vimconn.vimconnConnectionException("Tenant name is empty.")
562
563 vdc = self.get_vdc_details()
564 if vdc is None:
565 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
566
567 try:
568 vdcid = vdc.get_id().split(":")[3]
569 networks = self.vca.get_networks(vdc.get_name())
570 network_list = []
571
572 for network in networks:
573 filter_entry = {}
574 net_uuid = network.get_id().split(":")
575 if len(net_uuid) != 4:
576 continue
577 else:
578 net_uuid = net_uuid[3]
579 # create dict entry
580 self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
581 vdcid,
582 network.get_name()))
583 filter_entry["name"] = network.get_name()
584 filter_entry["id"] = net_uuid
585 filter_entry["shared"] = network.get_IsShared()
586 filter_entry["tenant_id"] = vdcid
587 if network.get_status() == 1:
588 filter_entry["admin_state_up"] = True
589 else:
590 filter_entry["admin_state_up"] = False
591 filter_entry["status"] = "ACTIVE"
592 filter_entry["type"] = "bridge"
593 filtered_entry = filter_entry.copy()
594
595 if filter_dict is not None and filter_dict:
596 # we remove all the key : value we don't care and match only
597 # respected field
598 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
599 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
600 if filter_dict == filter_entry:
601 network_list.append(filtered_entry)
602 else:
603 network_list.append(filtered_entry)
604 except:
605 self.logger.debug("Error in get_vcd_network_list")
606 self.logger.debug(traceback.format_exc())
607
608 self.logger.debug("Returning {}".format(network_list))
609 return network_list
610
611 def get_network(self, net_id):
612 """Method obtains network details of net_id VIM network
613 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
614
615 try:
616 vdc = self.get_vdc_details()
617 vdc_id = vdc.get_id().split(":")[3]
618
619 networks = self.vca.get_networks(vdc.get_name())
620 filter_dict = {}
621
622 for network in networks:
623 vdc_network_id = network.get_id().split(":")
624 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
625 filter_dict["name"] = network.get_name()
626 filter_dict["id"] = vdc_network_id[3]
627 filter_dict["shared"] = network.get_IsShared()
628 filter_dict["tenant_id"] = vdc_id
629 if network.get_status() == 1:
630 filter_dict["admin_state_up"] = True
631 else:
632 filter_dict["admin_state_up"] = False
633 filter_dict["status"] = "ACTIVE"
634 filter_dict["type"] = "bridge"
635 self.logger.debug("Returning {}".format(filter_dict))
636 return filter_dict
637 except:
638 self.logger.debug("Error in get_network")
639 self.logger.debug(traceback.format_exc())
640
641 return filter_dict
642
643 def delete_network(self, net_id):
644 """
645 Method Deletes a tenant network from VIM, provide the network id.
646
647 Returns the network identifier or raise an exception
648 """
649
650 # ############# Stub code for SRIOV #################
651 # dvport_group = self.get_dvport_group(net_id)
652 # if dvport_group:
653 # #delete portgroup
654 # status = self.destroy_dvport_group(net_id)
655 # if status:
656 # # Remove vlanID from persistent info
657 # if net_id in self.persistent_info["used_vlanIDs"]:
658 # del self.persistent_info["used_vlanIDs"][net_id]
659 #
660 # return net_id
661
662 vcd_network = self.get_vcd_network(network_uuid=net_id)
663 if vcd_network is not None and vcd_network:
664 if self.delete_network_action(network_uuid=net_id):
665 return net_id
666 else:
667 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
668
669 def refresh_nets_status(self, net_list):
670 """Get the status of the networks
671 Params: the list of network identifiers
672 Returns a dictionary with:
673 net_id: #VIM id of this network
674 status: #Mandatory. Text with one of:
675 # DELETED (not found at vim)
676 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
677 # OTHER (Vim reported other status not understood)
678 # ERROR (VIM indicates an ERROR status)
679 # ACTIVE, INACTIVE, DOWN (admin down),
680 # BUILD (on building process)
681 #
682 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
683 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
684
685 """
686
687 dict_entry = {}
688 try:
689 for net in net_list:
690 errormsg = ''
691 vcd_network = self.get_vcd_network(network_uuid=net)
692 if vcd_network is not None and vcd_network:
693 if vcd_network['status'] == '1':
694 status = 'ACTIVE'
695 else:
696 status = 'DOWN'
697 else:
698 status = 'DELETED'
699 errormsg = 'Network not found.'
700
701 dict_entry[net] = {'status': status, 'error_msg': errormsg,
702 'vim_info': yaml.safe_dump(vcd_network)}
703 except:
704 self.logger.debug("Error in refresh_nets_status")
705 self.logger.debug(traceback.format_exc())
706
707 return dict_entry
708
709 def get_flavor(self, flavor_id):
710 """Obtain flavor details from the VIM
711 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
712 """
713 if flavor_id not in vimconnector.flavorlist:
714 raise vimconn.vimconnNotFoundException("Flavor not found.")
715 return vimconnector.flavorlist[flavor_id]
716
717 def new_flavor(self, flavor_data):
718 """Adds a tenant flavor to VIM
719 flavor_data contains a dictionary with information, keys:
720 name: flavor name
721 ram: memory (cloud type) in MBytes
722 vpcus: cpus (cloud type)
723 extended: EPA parameters
724 - numas: #items requested in same NUMA
725 memory: number of 1G huge pages memory
726 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
727 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
728 - name: interface name
729 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
730 bandwidth: X Gbps; requested guarantee bandwidth
731 vpci: requested virtual PCI address
732 disk: disk size
733 is_public:
734 #TODO to concrete
735 Returns the flavor identifier"""
736
737 # generate a new uuid put to internal dict and return it.
738 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
739 new_flavor=flavor_data
740 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
741 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
742 disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
743
744 if not isinstance(ram, int):
745 raise vimconn.vimconnException("Non-integer value for ram")
746 elif not isinstance(cpu, int):
747 raise vimconn.vimconnException("Non-integer value for cpu")
748 elif not isinstance(disk, int):
749 raise vimconn.vimconnException("Non-integer value for disk")
750
751 extended_flv = flavor_data.get("extended")
752 if extended_flv:
753 numas=extended_flv.get("numas")
754 if numas:
755 for numa in numas:
756 #overwrite ram and vcpus
757 ram = numa['memory']*1024
758 if 'paired-threads' in numa:
759 cpu = numa['paired-threads']*2
760 elif 'cores' in numa:
761 cpu = numa['cores']
762 elif 'threads' in numa:
763 cpu = numa['threads']
764
765 new_flavor[FLAVOR_RAM_KEY] = ram
766 new_flavor[FLAVOR_VCPUS_KEY] = cpu
767 new_flavor[FLAVOR_DISK_KEY] = disk
768 # generate a new uuid put to internal dict and return it.
769 flavor_id = uuid.uuid4()
770 vimconnector.flavorlist[str(flavor_id)] = new_flavor
771 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
772
773 return str(flavor_id)
774
775 def delete_flavor(self, flavor_id):
776 """Deletes a tenant flavor from VIM identify by its id
777
778 Returns the used id or raise an exception
779 """
780 if flavor_id not in vimconnector.flavorlist:
781 raise vimconn.vimconnNotFoundException("Flavor not found.")
782
783 vimconnector.flavorlist.pop(flavor_id, None)
784 return flavor_id
785
786 def new_image(self, image_dict):
787 """
788 Adds a tenant image to VIM
789 Returns:
790 200, image-id if the image is created
791 <0, message if there is an error
792 """
793
794 return self.get_image_id_from_path(image_dict['location'])
795
796 def delete_image(self, image_id):
797 """
798
799 :param image_id:
800 :return:
801 """
802
803 raise vimconn.vimconnNotImplemented("Should have implemented this")
804
805 def catalog_exists(self, catalog_name, catalogs):
806 """
807
808 :param catalog_name:
809 :param catalogs:
810 :return:
811 """
812 for catalog in catalogs:
813 if catalog.name == catalog_name:
814 return True
815 return False
816
817 def create_vimcatalog(self, vca=None, catalog_name=None):
818 """ Create new catalog entry in vCloud director.
819
820 Args
821 vca: vCloud director.
822 catalog_name catalog that client wish to create. Note no validation done for a name.
823 Client must make sure that provide valid string representation.
824
825 Return (bool) True if catalog created.
826
827 """
828 try:
829 task = vca.create_catalog(catalog_name, catalog_name)
830 result = vca.block_until_completed(task)
831 if not result:
832 return False
833 catalogs = vca.get_catalogs()
834 except:
835 return False
836 return self.catalog_exists(catalog_name, catalogs)
837
838 # noinspection PyIncorrectDocstring
839 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
840 description='', progress=False, chunk_bytes=128 * 1024):
841 """
842 Uploads a OVF file to a vCloud catalog
843
844 :param chunk_bytes:
845 :param progress:
846 :param description:
847 :param image_name:
848 :param vca:
849 :param catalog_name: (str): The name of the catalog to upload the media.
850 :param media_file_name: (str): The name of the local media file to upload.
851 :return: (bool) True if the media file was successfully uploaded, false otherwise.
852 """
853 os.path.isfile(media_file_name)
854 statinfo = os.stat(media_file_name)
855
856 # find a catalog entry where we upload OVF.
857 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
858 # status change.
859 # if VCD can parse OVF we upload VMDK file
860 try:
861 for catalog in vca.get_catalogs():
862 if catalog_name != catalog.name:
863 continue
864 link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
865 link.get_rel() == 'add', catalog.get_Link())
866 assert len(link) == 1
867 data = """
868 <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
869 """ % (escape(catalog_name), escape(description))
870 headers = vca.vcloud_session.get_vcloud_headers()
871 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
872 response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
873 if response.status_code == requests.codes.created:
874 catalogItem = XmlElementTree.fromstring(response.content)
875 entity = [child for child in catalogItem if
876 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
877 href = entity.get('href')
878 template = href
879 response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
880 verify=vca.verify, logger=self.logger)
881
882 if response.status_code == requests.codes.ok:
883 media = mediaType.parseString(response.content, True)
884 link = filter(lambda link: link.get_rel() == 'upload:default',
885 media.get_Files().get_File()[0].get_Link())[0]
886 headers = vca.vcloud_session.get_vcloud_headers()
887 headers['Content-Type'] = 'Content-Type text/xml'
888 response = Http.put(link.get_href(),
889 data=open(media_file_name, 'rb'),
890 headers=headers,
891 verify=vca.verify, logger=self.logger)
892 if response.status_code != requests.codes.ok:
893 self.logger.debug(
894 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
895 media_file_name))
896 return False
897
898 # TODO fix this with aync block
899 time.sleep(5)
900
901 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
902
903 # uploading VMDK file
904 # check status of OVF upload and upload remaining files.
905 response = Http.get(template,
906 headers=vca.vcloud_session.get_vcloud_headers(),
907 verify=vca.verify,
908 logger=self.logger)
909
910 if response.status_code == requests.codes.ok:
911 media = mediaType.parseString(response.content, True)
912 number_of_files = len(media.get_Files().get_File())
913 for index in xrange(0, number_of_files):
914 links_list = filter(lambda link: link.get_rel() == 'upload:default',
915 media.get_Files().get_File()[index].get_Link())
916 for link in links_list:
917 # we skip ovf since it already uploaded.
918 if 'ovf' in link.get_href():
919 continue
920 # The OVF file and VMDK must be in a same directory
921 head, tail = os.path.split(media_file_name)
922 file_vmdk = head + '/' + link.get_href().split("/")[-1]
923 if not os.path.isfile(file_vmdk):
924 return False
925 statinfo = os.stat(file_vmdk)
926 if statinfo.st_size == 0:
927 return False
928 hrefvmdk = link.get_href()
929
930 if progress:
931 print("Uploading file: {}".format(file_vmdk))
932 if progress:
933 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
934 FileTransferSpeed()]
935 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
936
937 bytes_transferred = 0
938 f = open(file_vmdk, 'rb')
939 while bytes_transferred < statinfo.st_size:
940 my_bytes = f.read(chunk_bytes)
941 if len(my_bytes) <= chunk_bytes:
942 headers = vca.vcloud_session.get_vcloud_headers()
943 headers['Content-Range'] = 'bytes %s-%s/%s' % (
944 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
945 headers['Content-Length'] = str(len(my_bytes))
946 response = Http.put(hrefvmdk,
947 headers=headers,
948 data=my_bytes,
949 verify=vca.verify,
950 logger=None)
951
952 if response.status_code == requests.codes.ok:
953 bytes_transferred += len(my_bytes)
954 if progress:
955 progress_bar.update(bytes_transferred)
956 else:
957 self.logger.debug(
958 'file upload failed with error: [%s] %s' % (response.status_code,
959 response.content))
960
961 f.close()
962 return False
963 f.close()
964 if progress:
965 progress_bar.finish()
966 time.sleep(10)
967 return True
968 else:
969 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
970 format(catalog_name, media_file_name))
971 return False
972 except Exception as exp:
973 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
974 .format(catalog_name,media_file_name, exp))
975 raise vimconn.vimconnException(
976 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
977 .format(catalog_name,media_file_name, exp))
978
979 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
980 return False
981
982 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
983 """Upload media file"""
984 # TODO add named parameters for readability
985
986 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
987 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
988
989 def validate_uuid4(self, uuid_string=None):
990 """ Method validate correct format of UUID.
991
992 Return: true if string represent valid uuid
993 """
994 try:
995 val = uuid.UUID(uuid_string, version=4)
996 except ValueError:
997 return False
998 return True
999
1000 def get_catalogid(self, catalog_name=None, catalogs=None):
1001 """ Method check catalog and return catalog ID in UUID format.
1002
1003 Args
1004 catalog_name: catalog name as string
1005 catalogs: list of catalogs.
1006
1007 Return: catalogs uuid
1008 """
1009
1010 for catalog in catalogs:
1011 if catalog.name == catalog_name:
1012 catalog_id = catalog.get_id().split(":")
1013 return catalog_id[3]
1014 return None
1015
1016 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1017 """ Method check catalog and return catalog name lookup done by catalog UUID.
1018
1019 Args
1020 catalog_name: catalog name as string
1021 catalogs: list of catalogs.
1022
1023 Return: catalogs name or None
1024 """
1025
1026 if not self.validate_uuid4(uuid_string=catalog_uuid):
1027 return None
1028
1029 for catalog in catalogs:
1030 catalog_id = catalog.get_id().split(":")[3]
1031 if catalog_id == catalog_uuid:
1032 return catalog.name
1033 return None
1034
1035 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1036 """ Method check catalog and return catalog name lookup done by catalog UUID.
1037
1038 Args
1039 catalog_name: catalog name as string
1040 catalogs: list of catalogs.
1041
1042 Return: catalogs name or None
1043 """
1044
1045 if not self.validate_uuid4(uuid_string=catalog_uuid):
1046 return None
1047
1048 for catalog in catalogs:
1049 catalog_id = catalog.get_id().split(":")[3]
1050 if catalog_id == catalog_uuid:
1051 return catalog
1052 return None
1053
1054 def get_image_id_from_path(self, path=None, progress=False):
1055 """ Method upload OVF image to vCloud director.
1056
1057 Each OVF image represented as single catalog entry in vcloud director.
1058 The method check for existing catalog entry. The check done by file name without file extension.
1059
1060 if given catalog name already present method will respond with existing catalog uuid otherwise
1061 it will create new catalog entry and upload OVF file to newly created catalog.
1062
1063 If method can't create catalog entry or upload a file it will throw exception.
1064
1065 Method accept boolean flag progress that will output progress bar. It useful method
1066 for standalone upload use case. In case to test large file upload.
1067
1068 Args
1069 path: - valid path to OVF file.
1070 progress - boolean progress bar show progress bar.
1071
1072 Return: if image uploaded correct method will provide image catalog UUID.
1073 """
1074
1075 if not path:
1076 raise vimconn.vimconnException("Image path can't be None.")
1077
1078 if not os.path.isfile(path):
1079 raise vimconn.vimconnException("Can't read file. File not found.")
1080
1081 if not os.access(path, os.R_OK):
1082 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1083
1084 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1085
1086 dirpath, filename = os.path.split(path)
1087 flname, file_extension = os.path.splitext(path)
1088 if file_extension != '.ovf':
1089 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1090 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1091
1092 catalog_name = os.path.splitext(filename)[0]
1093 catalog_md5_name = hashlib.md5(path).hexdigest()
1094 self.logger.debug("File name {} Catalog Name {} file path {} "
1095 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1096
1097 try:
1098 catalogs = self.vca.get_catalogs()
1099 except Exception as exp:
1100 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1101 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1102
1103 if len(catalogs) == 0:
1104 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1105 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1106 if not result:
1107 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1108 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1109 media_name=filename, medial_file_name=path, progress=progress)
1110 if not result:
1111 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1112 return self.get_catalogid(catalog_name, self.vca.get_catalogs())
1113 else:
1114 for catalog in catalogs:
1115 # search for existing catalog if we find same name we return ID
1116 # TODO optimize this
1117 if catalog.name == catalog_md5_name:
1118 self.logger.debug("Found existing catalog entry for {} "
1119 "catalog id {}".format(catalog_name,
1120 self.get_catalogid(catalog_md5_name, catalogs)))
1121 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1122
1123 # if we didn't find existing catalog we create a new one and upload image.
1124 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1125 result = self.create_vimcatalog(self.vca, catalog_md5_name)
1126 if not result:
1127 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1128
1129 result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
1130 media_name=filename, medial_file_name=path, progress=progress)
1131 if not result:
1132 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1133
1134 return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
1135
1136 def get_image_list(self, filter_dict={}):
1137 '''Obtain tenant images from VIM
1138 Filter_dict can be:
1139 name: image name
1140 id: image uuid
1141 checksum: image checksum
1142 location: image path
1143 Returns the image list of dictionaries:
1144 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1145 List can be empty
1146 '''
1147
1148 try:
1149 image_list = []
1150 catalogs = self.vca.get_catalogs()
1151 if len(catalogs) == 0:
1152 return image_list
1153 else:
1154 for catalog in catalogs:
1155 catalog_uuid = catalog.get_id().split(":")[3]
1156 name = catalog.name
1157 filtered_dict = {}
1158 if filter_dict.get("name") and filter_dict["name"] != name:
1159 continue
1160 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1161 continue
1162 filtered_dict ["name"] = name
1163 filtered_dict ["id"] = catalog_uuid
1164 image_list.append(filtered_dict)
1165
1166 self.logger.debug("List of already created catalog items: {}".format(image_list))
1167 return image_list
1168 except Exception as exp:
1169 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1170
1171 def get_vappid(self, vdc=None, vapp_name=None):
1172 """ Method takes vdc object and vApp name and returns vapp uuid or None
1173
1174 Args:
1175 vdc: The VDC object.
1176 vapp_name: is application vappp name identifier
1177
1178 Returns:
1179 The return vApp name otherwise None
1180 """
1181 if vdc is None or vapp_name is None:
1182 return None
1183 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1184 try:
1185 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1186 vdc.ResourceEntities.ResourceEntity)
1187 if len(refs) == 1:
1188 return refs[0].href.split("vapp")[1][1:]
1189 except Exception as e:
1190 self.logger.exception(e)
1191 return False
1192 return None
1193
1194 def check_vapp(self, vdc=None, vapp_uuid=None):
1195 """ Method Method returns True or False if vapp deployed in vCloud director
1196
1197 Args:
1198 vca: Connector to VCA
1199 vdc: The VDC object.
1200 vappid: vappid is application identifier
1201
1202 Returns:
1203 The return True if vApp deployed
1204 :param vdc:
1205 :param vapp_uuid:
1206 """
1207 try:
1208 refs = filter(lambda ref:
1209 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1210 vdc.ResourceEntities.ResourceEntity)
1211 for ref in refs:
1212 vappid = ref.href.split("vapp")[1][1:]
1213 # find vapp with respected vapp uuid
1214 if vappid == vapp_uuid:
1215 return True
1216 except Exception as e:
1217 self.logger.exception(e)
1218 return False
1219 return False
1220
1221 def get_namebyvappid(self, vdc=None, vapp_uuid=None):
1222 """Method returns vApp name from vCD and lookup done by vapp_id.
1223
1224 Args:
1225 vca: Connector to VCA
1226 vdc: The VDC object.
1227 vapp_uuid: vappid is application identifier
1228
1229 Returns:
1230 The return vApp name otherwise None
1231 """
1232
1233 try:
1234 refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1235 vdc.ResourceEntities.ResourceEntity)
1236 for ref in refs:
1237 # we care only about UUID the rest doesn't matter
1238 vappid = ref.href.split("vapp")[1][1:]
1239 if vappid == vapp_uuid:
1240 response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
1241 logger=self.logger)
1242
1243 #Retry login if session expired & retry sending request
1244 if response.status_code == 403:
1245 response = self.retry_rest('GET', ref.href)
1246
1247 tree = XmlElementTree.fromstring(response.content)
1248 return tree.attrib['name']
1249 except Exception as e:
1250 self.logger.exception(e)
1251 return None
1252 return None
1253
1254 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
1255 cloud_config=None, disk_list=None):
1256 """Adds a VM instance to VIM
1257 Params:
1258 start: indicates if VM must start or boot in pause mode. Ignored
1259 image_id,flavor_id: image and flavor uuid
1260 net_list: list of interfaces, each one is a dictionary with:
1261 name:
1262 net_id: network uuid to connect
1263 vpci: virtual vcpi to assign
1264 model: interface model, virtio, e2000, ...
1265 mac_address:
1266 use: 'data', 'bridge', 'mgmt'
1267 type: 'virtual', 'PF', 'VF', 'VFnotShared'
1268 vim_id: filled/added by this function
1269 cloud_config: can be a text script to be passed directly to cloud-init,
1270 or an object to inject users and ssh keys with format:
1271 key-pairs: [] list of keys to install to the default user
1272 users: [{ name, key-pairs: []}] list of users to add with their key-pair
1273 #TODO ip, security groups
1274 Returns >=0, the instance identifier
1275 <0, error_text
1276 """
1277
1278 self.logger.info("Creating new instance for entry {}".format(name))
1279 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
1280 description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
1281
1282 #new vm name = vmname + tenant_id + uuid
1283 new_vm_name = [name, '-', str(uuid.uuid4())]
1284 vmname_andid = ''.join(new_vm_name)
1285
1286 # if vm already deployed we return existing uuid
1287 # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
1288 # if vapp_uuid is not None:
1289 # return vapp_uuid
1290
1291 # we check for presence of VDC, Catalog entry and Flavor.
1292 vdc = self.get_vdc_details()
1293 if vdc is None:
1294 raise vimconn.vimconnNotFoundException(
1295 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1296 catalogs = self.vca.get_catalogs()
1297 if catalogs is None:
1298 #Retry once, if failed by refreshing token
1299 self.get_token()
1300 catalogs = self.vca.get_catalogs()
1301 if catalogs is None:
1302 raise vimconn.vimconnNotFoundException(
1303 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1304
1305 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1306 if catalog_hash_name:
1307 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1308 else:
1309 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1310 "(Failed retrieve catalog information {})".format(name, image_id))
1311
1312
1313 # Set vCPU and Memory based on flavor.
1314 vm_cpus = None
1315 vm_memory = None
1316 vm_disk = None
1317 numas = None
1318
1319 if flavor_id is not None:
1320 if flavor_id not in vimconnector.flavorlist:
1321 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1322 "Failed retrieve flavor information "
1323 "flavor id {}".format(name, flavor_id))
1324 else:
1325 try:
1326 flavor = vimconnector.flavorlist[flavor_id]
1327 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1328 vm_memory = flavor[FLAVOR_RAM_KEY]
1329 vm_disk = flavor[FLAVOR_DISK_KEY]
1330 extended = flavor.get("extended", None)
1331 if extended:
1332 numas=extended.get("numas", None)
1333
1334 except Exception as exp:
1335 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1336
1337 # image upload creates template name as catalog name space Template.
1338 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1339 power_on = 'false'
1340 if start:
1341 power_on = 'true'
1342
1343 # client must provide at least one entry in net_list if not we report error
1344 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1345 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1346 primary_net = None
1347 primary_netname = None
1348 network_mode = 'bridged'
1349 if net_list is not None and len(net_list) > 0:
1350 for net in net_list:
1351 if 'use' in net and net['use'] == 'mgmt':
1352 primary_net = net
1353 if primary_net is None:
1354 primary_net = net_list[0]
1355
1356 try:
1357 primary_net_id = primary_net['net_id']
1358 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1359 if 'name' in network_dict:
1360 primary_netname = network_dict['name']
1361
1362 except KeyError:
1363 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1364 else:
1365 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1366
1367 # use: 'data', 'bridge', 'mgmt'
1368 # create vApp. Set vcpu and ram based on flavor id.
1369 try:
1370 for retry in (1,2):
1371 vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
1372 self.get_catalogbyid(image_id, catalogs),
1373 network_name=None, # None while creating vapp
1374 network_mode=network_mode,
1375 vm_name=vmname_andid,
1376 vm_cpus=vm_cpus, # can be None if flavor is None
1377 vm_memory=vm_memory) # can be None if flavor is None
1378
1379 if not vapptask and retry==1:
1380 self.get_token() # Retry getting token
1381 continue
1382 else:
1383 break
1384
1385 if vapptask is None or vapptask is False:
1386 raise vimconn.vimconnUnexpectedResponse(
1387 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1388 if type(vapptask) is VappTask:
1389 self.vca.block_until_completed(vapptask)
1390
1391 except Exception as exp:
1392 raise vimconn.vimconnUnexpectedResponse(
1393 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1394
1395 # we should have now vapp in undeployed state.
1396 try:
1397 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1398
1399 except Exception as exp:
1400 raise vimconn.vimconnUnexpectedResponse(
1401 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1402 .format(vmname_andid, exp))
1403
1404 if vapp_uuid is None:
1405 raise vimconn.vimconnUnexpectedResponse(
1406 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1407 vmname_andid))
1408
1409 #Add PCI passthrough/SRIOV configrations
1410 vm_obj = None
1411 pci_devices_info = []
1412 sriov_net_info = []
1413 reserve_memory = False
1414
1415 for net in net_list:
1416 if net["type"]=="PF":
1417 pci_devices_info.append(net)
1418 elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
1419 sriov_net_info.append(net)
1420
1421 #Add PCI
1422 if len(pci_devices_info) > 0:
1423 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1424 vmname_andid ))
1425 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1426 pci_devices_info,
1427 vmname_andid)
1428 if PCI_devices_status:
1429 self.logger.info("Added PCI devives {} to VM {}".format(
1430 pci_devices_info,
1431 vmname_andid)
1432 )
1433 reserve_memory = True
1434 else:
1435 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1436 pci_devices_info,
1437 vmname_andid)
1438 )
1439
1440 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1441 # Modify vm disk
1442 if vm_disk:
1443 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1444 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1445 if result :
1446 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1447
1448 #Add new or existing disks to vApp
1449 if disk_list:
1450 added_existing_disk = False
1451 for disk in disk_list:
1452 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1453 image_id = disk['image_id']
1454 # Adding CD-ROM to VM
1455 # will revisit code once specification ready to support this feature
1456 self.insert_media_to_vm(vapp, image_id)
1457 elif "image_id" in disk and disk["image_id"] is not None:
1458 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1459 disk["image_id"] , vapp_uuid))
1460 self.add_existing_disk(catalogs=catalogs,
1461 image_id=disk["image_id"],
1462 size = disk["size"],
1463 template_name=templateName,
1464 vapp_uuid=vapp_uuid
1465 )
1466 added_existing_disk = True
1467 else:
1468 #Wait till added existing disk gets reflected into vCD database/API
1469 if added_existing_disk:
1470 time.sleep(5)
1471 added_existing_disk = False
1472 self.add_new_disk(vapp_uuid, disk['size'])
1473
1474 if numas:
1475 # Assigning numa affinity setting
1476 for numa in numas:
1477 if 'paired-threads-id' in numa:
1478 paired_threads_id = numa['paired-threads-id']
1479 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1480
1481 # add NICs & connect to networks in netlist
1482 try:
1483 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1484 nicIndex = 0
1485 primary_nic_index = 0
1486 for net in net_list:
1487 # openmano uses network id in UUID format.
1488 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1489 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1490 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1491
1492 if 'net_id' not in net:
1493 continue
1494
1495 interface_net_id = net['net_id']
1496 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1497 interface_network_mode = net['use']
1498
1499 if interface_network_mode == 'mgmt':
1500 primary_nic_index = nicIndex
1501
1502 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1503 - DHCP (The IP address is obtained from a DHCP service.)
1504 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1505 - NONE (No IP addressing mode specified.)"""
1506
1507 if primary_netname is not None:
1508 nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
1509 if len(nets) == 1:
1510 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
1511
1512 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1513 task = vapp.connect_to_network(nets[0].name, nets[0].href)
1514 if type(task) is GenericTask:
1515 self.vca.block_until_completed(task)
1516 # connect network to VM - with all DHCP by default
1517
1518 type_list = ['PF','VF','VFnotShared']
1519 if 'type' in net and net['type'] not in type_list:
1520 # fetching nic type from vnf
1521 if 'model' in net:
1522 nic_type = net['model']
1523 self.logger.info("new_vminstance(): adding network adapter "\
1524 "to a network {}".format(nets[0].name))
1525 self.add_network_adapter_to_vms(vapp, nets[0].name,
1526 primary_nic_index,
1527 nicIndex,
1528 net,
1529 nic_type=nic_type)
1530 else:
1531 self.logger.info("new_vminstance(): adding network adapter "\
1532 "to a network {}".format(nets[0].name))
1533 self.add_network_adapter_to_vms(vapp, nets[0].name,
1534 primary_nic_index,
1535 nicIndex,
1536 net)
1537 nicIndex += 1
1538
1539 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1540 # cloud-init for ssh-key injection
1541 if cloud_config:
1542 self.cloud_init(vapp,cloud_config)
1543
1544 # deploy and power on vm
1545 self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
1546 deploytask = vapp.deploy(powerOn=False)
1547 if type(deploytask) is GenericTask:
1548 self.vca.block_until_completed(deploytask)
1549
1550 # ############# Stub code for SRIOV #################
1551 #Add SRIOV
1552 # if len(sriov_net_info) > 0:
1553 # self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
1554 # vmname_andid ))
1555 # sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
1556 # sriov_net_info,
1557 # vmname_andid)
1558 # if sriov_status:
1559 # self.logger.info("Added SRIOV {} to VM {}".format(
1560 # sriov_net_info,
1561 # vmname_andid)
1562 # )
1563 # reserve_memory = True
1564 # else:
1565 # self.logger.info("Fail to add SRIOV {} to VM {}".format(
1566 # sriov_net_info,
1567 # vmname_andid)
1568 # )
1569
1570 # If VM has PCI devices or SRIOV reserve memory for VM
1571 if reserve_memory:
1572 memReserve = vm_obj.config.hardware.memoryMB
1573 spec = vim.vm.ConfigSpec()
1574 spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
1575 task = vm_obj.ReconfigVM_Task(spec=spec)
1576 if task:
1577 result = self.wait_for_vcenter_task(task, vcenter_conect)
1578 self.logger.info("Reserved memmoery {} MB for "\
1579 "VM VM status: {}".format(str(memReserve),result))
1580 else:
1581 self.logger.info("Fail to reserved memmoery {} to VM {}".format(
1582 str(memReserve),str(vm_obj)))
1583
1584 self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
1585
1586 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1587 poweron_task = vapp.poweron()
1588 if type(poweron_task) is GenericTask:
1589 self.vca.block_until_completed(poweron_task)
1590
1591 except Exception as exp :
1592 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1593 self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
1594 .format(name, exp))
1595 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1596 .format(name, exp))
1597
1598 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1599 wait_time = 0
1600 vapp_uuid = None
1601 while wait_time <= MAX_WAIT_TIME:
1602 try:
1603 vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
1604 except Exception as exp:
1605 raise vimconn.vimconnUnexpectedResponse(
1606 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1607 .format(vmname_andid, exp))
1608
1609 if vapp and vapp.me.deployed:
1610 vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
1611 break
1612 else:
1613 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1614 time.sleep(INTERVAL_TIME)
1615
1616 wait_time +=INTERVAL_TIME
1617
1618 if vapp_uuid is not None:
1619 return vapp_uuid
1620 else:
1621 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
1622
1623 ##
1624 ##
1625 ## based on current discussion
1626 ##
1627 ##
1628 ## server:
1629 # created: '2016-09-08T11:51:58'
1630 # description: simple-instance.linux1.1
1631 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
1632 # hostId: e836c036-74e7-11e6-b249-0800273e724c
1633 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
1634 # status: ACTIVE
1635 # error_msg:
1636 # interfaces: …
1637 #
1638 def get_vminstance(self, vim_vm_uuid=None):
1639 """Returns the VM instance information from VIM"""
1640
1641 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
1642
1643 vdc = self.get_vdc_details()
1644 if vdc is None:
1645 raise vimconn.vimconnConnectionException(
1646 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1647
1648 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
1649 if not vm_info_dict:
1650 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1651 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
1652
1653 status_key = vm_info_dict['status']
1654 error = ''
1655 try:
1656 vm_dict = {'created': vm_info_dict['created'],
1657 'description': vm_info_dict['name'],
1658 'status': vcdStatusCode2manoFormat[int(status_key)],
1659 'hostId': vm_info_dict['vmuuid'],
1660 'error_msg': error,
1661 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1662
1663 if 'interfaces' in vm_info_dict:
1664 vm_dict['interfaces'] = vm_info_dict['interfaces']
1665 else:
1666 vm_dict['interfaces'] = []
1667 except KeyError:
1668 vm_dict = {'created': '',
1669 'description': '',
1670 'status': vcdStatusCode2manoFormat[int(-1)],
1671 'hostId': vm_info_dict['vmuuid'],
1672 'error_msg': "Inconsistency state",
1673 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
1674
1675 return vm_dict
1676
1677 def delete_vminstance(self, vm__vim_uuid):
1678 """Method poweroff and remove VM instance from vcloud director network.
1679
1680 Args:
1681 vm__vim_uuid: VM UUID
1682
1683 Returns:
1684 Returns the instance identifier
1685 """
1686
1687 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
1688
1689 vdc = self.get_vdc_details()
1690 if vdc is None:
1691 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
1692 self.tenant_name))
1693 raise vimconn.vimconnException(
1694 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1695
1696 try:
1697 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1698 if vapp_name is None:
1699 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1700 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1701 else:
1702 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1703
1704 # Delete vApp and wait for status change if task executed and vApp is None.
1705 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1706
1707 if vapp:
1708 if vapp.me.deployed:
1709 self.logger.info("Powering off vApp {}".format(vapp_name))
1710 #Power off vApp
1711 powered_off = False
1712 wait_time = 0
1713 while wait_time <= MAX_WAIT_TIME:
1714 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1715 if not vapp:
1716 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1717 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1718
1719 power_off_task = vapp.poweroff()
1720 if type(power_off_task) is GenericTask:
1721 result = self.vca.block_until_completed(power_off_task)
1722 if result:
1723 powered_off = True
1724 break
1725 else:
1726 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
1727 time.sleep(INTERVAL_TIME)
1728
1729 wait_time +=INTERVAL_TIME
1730 if not powered_off:
1731 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
1732 else:
1733 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
1734
1735 #Undeploy vApp
1736 self.logger.info("Undeploy vApp {}".format(vapp_name))
1737 wait_time = 0
1738 undeployed = False
1739 while wait_time <= MAX_WAIT_TIME:
1740 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1741 if not vapp:
1742 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1743 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1744 undeploy_task = vapp.undeploy(action='powerOff')
1745
1746 if type(undeploy_task) is GenericTask:
1747 result = self.vca.block_until_completed(undeploy_task)
1748 if result:
1749 undeployed = True
1750 break
1751 else:
1752 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
1753 time.sleep(INTERVAL_TIME)
1754
1755 wait_time +=INTERVAL_TIME
1756
1757 if not undeployed:
1758 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
1759
1760 # delete vapp
1761 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
1762 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1763
1764 if vapp is not None:
1765 wait_time = 0
1766 result = False
1767
1768 while wait_time <= MAX_WAIT_TIME:
1769 vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
1770 if not vapp:
1771 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1772 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
1773
1774 delete_task = vapp.delete()
1775
1776 if type(delete_task) is GenericTask:
1777 self.vca.block_until_completed(delete_task)
1778 result = self.vca.block_until_completed(delete_task)
1779 if result:
1780 break
1781 else:
1782 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
1783 time.sleep(INTERVAL_TIME)
1784
1785 wait_time +=INTERVAL_TIME
1786
1787 if not result:
1788 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
1789
1790 except:
1791 self.logger.debug(traceback.format_exc())
1792 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1793
1794 if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
1795 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
1796 return vm__vim_uuid
1797 else:
1798 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
1799
1800 def refresh_vms_status(self, vm_list):
1801 """Get the status of the virtual machines and their interfaces/ports
1802 Params: the list of VM identifiers
1803 Returns a dictionary with:
1804 vm_id: #VIM id of this Virtual Machine
1805 status: #Mandatory. Text with one of:
1806 # DELETED (not found at vim)
1807 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1808 # OTHER (Vim reported other status not understood)
1809 # ERROR (VIM indicates an ERROR status)
1810 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
1811 # CREATING (on building process), ERROR
1812 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
1813 #
1814 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1815 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1816 interfaces:
1817 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1818 mac_address: #Text format XX:XX:XX:XX:XX:XX
1819 vim_net_id: #network id where this interface is connected
1820 vim_interface_id: #interface/port VIM id
1821 ip_address: #null, or text with IPv4, IPv6 address
1822 """
1823
1824 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
1825
1826 vdc = self.get_vdc_details()
1827 if vdc is None:
1828 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
1829
1830 vms_dict = {}
1831 nsx_edge_list = []
1832 for vmuuid in vm_list:
1833 vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
1834 if vmname is not None:
1835
1836 try:
1837 vm_pci_details = self.get_vm_pci_details(vmuuid)
1838 the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
1839 vm_info = the_vapp.get_vms_details()
1840 vm_status = vm_info[0]['status']
1841 vm_info[0].update(vm_pci_details)
1842
1843 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1844 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
1845 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
1846
1847 # get networks
1848 vm_app_networks = the_vapp.get_vms_network_info()
1849 for vapp_network in vm_app_networks:
1850 for vm_network in vapp_network:
1851 if vm_network['name'] == vmname:
1852 #Assign IP Address based on MAC Address in NSX DHCP lease info
1853 if vm_network['ip'] is None:
1854 if not nsx_edge_list:
1855 nsx_edge_list = self.get_edge_details()
1856 if nsx_edge_list is None:
1857 raise vimconn.vimconnException("refresh_vms_status:"\
1858 "Failed to get edge details from NSX Manager")
1859 if vm_network['mac'] is not None:
1860 vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
1861
1862 vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
1863 interface = {"mac_address": vm_network['mac'],
1864 "vim_net_id": vm_net_id,
1865 "vim_interface_id": vm_net_id,
1866 'ip_address': vm_network['ip']}
1867 # interface['vim_info'] = yaml.safe_dump(vm_network)
1868 vm_dict["interfaces"].append(interface)
1869 # add a vm to vm dict
1870 vms_dict.setdefault(vmuuid, vm_dict)
1871 except Exception as exp:
1872 self.logger.debug("Error in response {}".format(exp))
1873 self.logger.debug(traceback.format_exc())
1874
1875 return vms_dict
1876
1877
1878 def get_edge_details(self):
1879 """Get the NSX edge list from NSX Manager
1880 Returns list of NSX edges
1881 """
1882 edge_list = []
1883 rheaders = {'Content-Type': 'application/xml'}
1884 nsx_api_url = '/api/4.0/edges'
1885
1886 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
1887
1888 try:
1889 resp = requests.get(self.nsx_manager + nsx_api_url,
1890 auth = (self.nsx_user, self.nsx_password),
1891 verify = False, headers = rheaders)
1892 if resp.status_code == requests.codes.ok:
1893 paged_Edge_List = XmlElementTree.fromstring(resp.text)
1894 for edge_pages in paged_Edge_List:
1895 if edge_pages.tag == 'edgePage':
1896 for edge_summary in edge_pages:
1897 if edge_summary.tag == 'pagingInfo':
1898 for element in edge_summary:
1899 if element.tag == 'totalCount' and element.text == '0':
1900 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
1901 .format(self.nsx_manager))
1902
1903 if edge_summary.tag == 'edgeSummary':
1904 for element in edge_summary:
1905 if element.tag == 'id':
1906 edge_list.append(element.text)
1907 else:
1908 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
1909 .format(self.nsx_manager))
1910
1911 if not edge_list:
1912 raise vimconn.vimconnException("get_edge_details: "\
1913 "No NSX edge details found: {}"
1914 .format(self.nsx_manager))
1915 else:
1916 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
1917 return edge_list
1918 else:
1919 self.logger.debug("get_edge_details: "
1920 "Failed to get NSX edge details from NSX Manager: {}"
1921 .format(resp.content))
1922 return None
1923
1924 except Exception as exp:
1925 self.logger.debug("get_edge_details: "\
1926 "Failed to get NSX edge details from NSX Manager: {}"
1927 .format(exp))
1928 raise vimconn.vimconnException("get_edge_details: "\
1929 "Failed to get NSX edge details from NSX Manager: {}"
1930 .format(exp))
1931
1932
1933 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
1934 """Get IP address details from NSX edges, using the MAC address
1935 PARAMS: nsx_edges : List of NSX edges
1936 mac_address : Find IP address corresponding to this MAC address
1937 Returns: IP address corrresponding to the provided MAC address
1938 """
1939
1940 ip_addr = None
1941 rheaders = {'Content-Type': 'application/xml'}
1942
1943 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
1944
1945 try:
1946 for edge in nsx_edges:
1947 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
1948
1949 resp = requests.get(self.nsx_manager + nsx_api_url,
1950 auth = (self.nsx_user, self.nsx_password),
1951 verify = False, headers = rheaders)
1952
1953 if resp.status_code == requests.codes.ok:
1954 dhcp_leases = XmlElementTree.fromstring(resp.text)
1955 for child in dhcp_leases:
1956 if child.tag == 'dhcpLeaseInfo':
1957 dhcpLeaseInfo = child
1958 for leaseInfo in dhcpLeaseInfo:
1959 for elem in leaseInfo:
1960 if (elem.tag)=='macAddress':
1961 edge_mac_addr = elem.text
1962 if (elem.tag)=='ipAddress':
1963 ip_addr = elem.text
1964 if edge_mac_addr is not None:
1965 if edge_mac_addr == mac_address:
1966 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
1967 .format(ip_addr, mac_address,edge))
1968 return ip_addr
1969 else:
1970 self.logger.debug("get_ipaddr_from_NSXedge: "\
1971 "Error occurred while getting DHCP lease info from NSX Manager: {}"
1972 .format(resp.content))
1973
1974 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
1975 return None
1976
1977 except XmlElementTree.ParseError as Err:
1978 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
1979
1980
1981 def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
1982 """Send and action over a VM instance from VIM
1983 Returns the vm_id if the action was successfully sent to the VIM"""
1984
1985 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
1986 if vm__vim_uuid is None or action_dict is None:
1987 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
1988
1989 vdc = self.get_vdc_details()
1990 if vdc is None:
1991 return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
1992
1993 vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
1994 if vapp_name is None:
1995 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1996 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
1997 else:
1998 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
1999
2000 try:
2001 the_vapp = self.vca.get_vapp(vdc, vapp_name)
2002 # TODO fix all status
2003 if "start" in action_dict:
2004 vm_info = the_vapp.get_vms_details()
2005 vm_status = vm_info[0]['status']
2006 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
2007 if vm_status == "Suspended" or vm_status == "Powered off":
2008 power_on_task = the_vapp.poweron()
2009 result = self.vca.block_until_completed(power_on_task)
2010 self.instance_actions_result("start", result, vapp_name)
2011 elif "rebuild" in action_dict:
2012 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
2013 rebuild_task = the_vapp.deploy(powerOn=True)
2014 result = self.vca.block_until_completed(rebuild_task)
2015 self.instance_actions_result("rebuild", result, vapp_name)
2016 elif "pause" in action_dict:
2017 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
2018 pause_task = the_vapp.undeploy(action='suspend')
2019 result = self.vca.block_until_completed(pause_task)
2020 self.instance_actions_result("pause", result, vapp_name)
2021 elif "resume" in action_dict:
2022 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
2023 power_task = the_vapp.poweron()
2024 result = self.vca.block_until_completed(power_task)
2025 self.instance_actions_result("resume", result, vapp_name)
2026 elif "shutoff" in action_dict or "shutdown" in action_dict:
2027 action_name , value = action_dict.items()[0]
2028 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
2029 power_off_task = the_vapp.undeploy(action='powerOff')
2030 result = self.vca.block_until_completed(power_off_task)
2031 if action_name == "shutdown":
2032 self.instance_actions_result("shutdown", result, vapp_name)
2033 else:
2034 self.instance_actions_result("shutoff", result, vapp_name)
2035 elif "forceOff" in action_dict:
2036 result = the_vapp.undeploy(action='force')
2037 self.instance_actions_result("forceOff", result, vapp_name)
2038 elif "reboot" in action_dict:
2039 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
2040 reboot_task = the_vapp.reboot()
2041 else:
2042 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
2043 return vm__vim_uuid
2044 except Exception as exp :
2045 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
2046 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
2047
2048 def instance_actions_result(self, action, result, vapp_name):
2049 if result:
2050 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
2051 else:
2052 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
2053
2054 def get_vminstance_console(self, vm_id, console_type="vnc"):
2055 """
2056 Get a console for the virtual machine
2057 Params:
2058 vm_id: uuid of the VM
2059 console_type, can be:
2060 "novnc" (by default), "xvpvnc" for VNC types,
2061 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2062 Returns dict with the console parameters:
2063 protocol: ssh, ftp, http, https, ...
2064 server: usually ip address
2065 port: the http, ssh, ... port
2066 suffix: extra text, e.g. the http path and query string
2067 """
2068 raise vimconn.vimconnNotImplemented("Should have implemented this")
2069
2070 # NOT USED METHODS in current version
2071
2072 def host_vim2gui(self, host, server_dict):
2073 """Transform host dictionary from VIM format to GUI format,
2074 and append to the server_dict
2075 """
2076 raise vimconn.vimconnNotImplemented("Should have implemented this")
2077
2078 def get_hosts_info(self):
2079 """Get the information of deployed hosts
2080 Returns the hosts content"""
2081 raise vimconn.vimconnNotImplemented("Should have implemented this")
2082
2083 def get_hosts(self, vim_tenant):
2084 """Get the hosts and deployed instances
2085 Returns the hosts content"""
2086 raise vimconn.vimconnNotImplemented("Should have implemented this")
2087
2088 def get_processor_rankings(self):
2089 """Get the processor rankings in the VIM database"""
2090 raise vimconn.vimconnNotImplemented("Should have implemented this")
2091
2092 def new_host(self, host_data):
2093 """Adds a new host to VIM"""
2094 '''Returns status code of the VIM response'''
2095 raise vimconn.vimconnNotImplemented("Should have implemented this")
2096
2097 def new_external_port(self, port_data):
2098 """Adds a external port to VIM"""
2099 '''Returns the port identifier'''
2100 raise vimconn.vimconnNotImplemented("Should have implemented this")
2101
2102 def new_external_network(self, net_name, net_type):
2103 """Adds a external network to VIM (shared)"""
2104 '''Returns the network identifier'''
2105 raise vimconn.vimconnNotImplemented("Should have implemented this")
2106
2107 def connect_port_network(self, port_id, network_id, admin=False):
2108 """Connects a external port to a network"""
2109 '''Returns status code of the VIM response'''
2110 raise vimconn.vimconnNotImplemented("Should have implemented this")
2111
2112 def new_vminstancefromJSON(self, vm_data):
2113 """Adds a VM instance to VIM"""
2114 '''Returns the instance identifier'''
2115 raise vimconn.vimconnNotImplemented("Should have implemented this")
2116
2117 def get_network_name_by_id(self, network_uuid=None):
2118 """Method gets vcloud director network named based on supplied uuid.
2119
2120 Args:
2121 network_uuid: network_id
2122
2123 Returns:
2124 The return network name.
2125 """
2126
2127 if not network_uuid:
2128 return None
2129
2130 try:
2131 org_dict = self.get_org(self.org_uuid)
2132 if 'networks' in org_dict:
2133 org_network_dict = org_dict['networks']
2134 for net_uuid in org_network_dict:
2135 if net_uuid == network_uuid:
2136 return org_network_dict[net_uuid]
2137 except:
2138 self.logger.debug("Exception in get_network_name_by_id")
2139 self.logger.debug(traceback.format_exc())
2140
2141 return None
2142
2143 def get_network_id_by_name(self, network_name=None):
2144 """Method gets vcloud director network uuid based on supplied name.
2145
2146 Args:
2147 network_name: network_name
2148 Returns:
2149 The return network uuid.
2150 network_uuid: network_id
2151 """
2152
2153 if not network_name:
2154 self.logger.debug("get_network_id_by_name() : Network name is empty")
2155 return None
2156
2157 try:
2158 org_dict = self.get_org(self.org_uuid)
2159 if org_dict and 'networks' in org_dict:
2160 org_network_dict = org_dict['networks']
2161 for net_uuid,net_name in org_network_dict.iteritems():
2162 if net_name == network_name:
2163 return net_uuid
2164
2165 except KeyError as exp:
2166 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
2167
2168 return None
2169
2170 def list_org_action(self):
2171 """
2172 Method leverages vCloud director and query for available organization for particular user
2173
2174 Args:
2175 vca - is active VCA connection.
2176 vdc_name - is a vdc name that will be used to query vms action
2177
2178 Returns:
2179 The return XML respond
2180 """
2181
2182 url_list = [self.vca.host, '/api/org']
2183 vm_list_rest_call = ''.join(url_list)
2184
2185 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2186 response = Http.get(url=vm_list_rest_call,
2187 headers=self.vca.vcloud_session.get_vcloud_headers(),
2188 verify=self.vca.verify,
2189 logger=self.vca.logger)
2190
2191 if response.status_code == 403:
2192 response = self.retry_rest('GET', vm_list_rest_call)
2193
2194 if response.status_code == requests.codes.ok:
2195 return response.content
2196
2197 return None
2198
2199 def get_org_action(self, org_uuid=None):
2200 """
2201 Method leverages vCloud director and retrieve available object fdr organization.
2202
2203 Args:
2204 vca - is active VCA connection.
2205 vdc_name - is a vdc name that will be used to query vms action
2206
2207 Returns:
2208 The return XML respond
2209 """
2210
2211 if org_uuid is None:
2212 return None
2213
2214 url_list = [self.vca.host, '/api/org/', org_uuid]
2215 vm_list_rest_call = ''.join(url_list)
2216
2217 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2218 response = Http.get(url=vm_list_rest_call,
2219 headers=self.vca.vcloud_session.get_vcloud_headers(),
2220 verify=self.vca.verify,
2221 logger=self.vca.logger)
2222
2223 #Retry login if session expired & retry sending request
2224 if response.status_code == 403:
2225 response = self.retry_rest('GET', vm_list_rest_call)
2226
2227 if response.status_code == requests.codes.ok:
2228 return response.content
2229
2230 return None
2231
2232 def get_org(self, org_uuid=None):
2233 """
2234 Method retrieves available organization in vCloud Director
2235
2236 Args:
2237 org_uuid - is a organization uuid.
2238
2239 Returns:
2240 The return dictionary with following key
2241 "network" - for network list under the org
2242 "catalogs" - for network list under the org
2243 "vdcs" - for vdc list under org
2244 """
2245
2246 org_dict = {}
2247
2248 if org_uuid is None:
2249 return org_dict
2250
2251 content = self.get_org_action(org_uuid=org_uuid)
2252 try:
2253 vdc_list = {}
2254 network_list = {}
2255 catalog_list = {}
2256 vm_list_xmlroot = XmlElementTree.fromstring(content)
2257 for child in vm_list_xmlroot:
2258 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
2259 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2260 org_dict['vdcs'] = vdc_list
2261 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
2262 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2263 org_dict['networks'] = network_list
2264 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
2265 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
2266 org_dict['catalogs'] = catalog_list
2267 except:
2268 pass
2269
2270 return org_dict
2271
2272 def get_org_list(self):
2273 """
2274 Method retrieves available organization in vCloud Director
2275
2276 Args:
2277 vca - is active VCA connection.
2278
2279 Returns:
2280 The return dictionary and key for each entry VDC UUID
2281 """
2282
2283 org_dict = {}
2284
2285 content = self.list_org_action()
2286 try:
2287 vm_list_xmlroot = XmlElementTree.fromstring(content)
2288 for vm_xml in vm_list_xmlroot:
2289 if vm_xml.tag.split("}")[1] == 'Org':
2290 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
2291 org_dict[org_uuid[0]] = vm_xml.attrib['name']
2292 except:
2293 pass
2294
2295 return org_dict
2296
2297 def vms_view_action(self, vdc_name=None):
2298 """ Method leverages vCloud director vms query call
2299
2300 Args:
2301 vca - is active VCA connection.
2302 vdc_name - is a vdc name that will be used to query vms action
2303
2304 Returns:
2305 The return XML respond
2306 """
2307 vca = self.connect()
2308 if vdc_name is None:
2309 return None
2310
2311 url_list = [vca.host, '/api/vms/query']
2312 vm_list_rest_call = ''.join(url_list)
2313
2314 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2315 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
2316 vca.vcloud_session.organization.Link)
2317 if len(refs) == 1:
2318 response = Http.get(url=vm_list_rest_call,
2319 headers=vca.vcloud_session.get_vcloud_headers(),
2320 verify=vca.verify,
2321 logger=vca.logger)
2322 if response.status_code == requests.codes.ok:
2323 return response.content
2324
2325 return None
2326
2327 def get_vapp_list(self, vdc_name=None):
2328 """
2329 Method retrieves vApp list deployed vCloud director and returns a dictionary
2330 contains a list of all vapp deployed for queried VDC.
2331 The key for a dictionary is vApp UUID
2332
2333
2334 Args:
2335 vca - is active VCA connection.
2336 vdc_name - is a vdc name that will be used to query vms action
2337
2338 Returns:
2339 The return dictionary and key for each entry vapp UUID
2340 """
2341
2342 vapp_dict = {}
2343 if vdc_name is None:
2344 return vapp_dict
2345
2346 content = self.vms_view_action(vdc_name=vdc_name)
2347 try:
2348 vm_list_xmlroot = XmlElementTree.fromstring(content)
2349 for vm_xml in vm_list_xmlroot:
2350 if vm_xml.tag.split("}")[1] == 'VMRecord':
2351 if vm_xml.attrib['isVAppTemplate'] == 'true':
2352 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
2353 if 'vappTemplate-' in rawuuid[0]:
2354 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2355 # vm and use raw UUID as key
2356 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
2357 except:
2358 pass
2359
2360 return vapp_dict
2361
2362 def get_vm_list(self, vdc_name=None):
2363 """
2364 Method retrieves VM's list deployed vCloud director. It returns a dictionary
2365 contains a list of all VM's deployed for queried VDC.
2366 The key for a dictionary is VM UUID
2367
2368
2369 Args:
2370 vca - is active VCA connection.
2371 vdc_name - is a vdc name that will be used to query vms action
2372
2373 Returns:
2374 The return dictionary and key for each entry vapp UUID
2375 """
2376 vm_dict = {}
2377
2378 if vdc_name is None:
2379 return vm_dict
2380
2381 content = self.vms_view_action(vdc_name=vdc_name)
2382 try:
2383 vm_list_xmlroot = XmlElementTree.fromstring(content)
2384 for vm_xml in vm_list_xmlroot:
2385 if vm_xml.tag.split("}")[1] == 'VMRecord':
2386 if vm_xml.attrib['isVAppTemplate'] == 'false':
2387 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2388 if 'vm-' in rawuuid[0]:
2389 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
2390 # vm and use raw UUID as key
2391 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2392 except:
2393 pass
2394
2395 return vm_dict
2396
2397 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
2398 """
2399 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
2400 contains a list of all VM's deployed for queried VDC.
2401 The key for a dictionary is VM UUID
2402
2403
2404 Args:
2405 vca - is active VCA connection.
2406 vdc_name - is a vdc name that will be used to query vms action
2407
2408 Returns:
2409 The return dictionary and key for each entry vapp UUID
2410 """
2411 vm_dict = {}
2412 vca = self.connect()
2413 if not vca:
2414 raise vimconn.vimconnConnectionException("self.connect() is failed")
2415
2416 if vdc_name is None:
2417 return vm_dict
2418
2419 content = self.vms_view_action(vdc_name=vdc_name)
2420 try:
2421 vm_list_xmlroot = XmlElementTree.fromstring(content)
2422 for vm_xml in vm_list_xmlroot:
2423 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
2424 # lookup done by UUID
2425 if isuuid:
2426 if vapp_name in vm_xml.attrib['container']:
2427 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2428 if 'vm-' in rawuuid[0]:
2429 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2430 break
2431 # lookup done by Name
2432 else:
2433 if vapp_name in vm_xml.attrib['name']:
2434 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
2435 if 'vm-' in rawuuid[0]:
2436 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
2437 break
2438 except:
2439 pass
2440
2441 return vm_dict
2442
2443 def get_network_action(self, network_uuid=None):
2444 """
2445 Method leverages vCloud director and query network based on network uuid
2446
2447 Args:
2448 vca - is active VCA connection.
2449 network_uuid - is a network uuid
2450
2451 Returns:
2452 The return XML respond
2453 """
2454
2455 if network_uuid is None:
2456 return None
2457
2458 url_list = [self.vca.host, '/api/network/', network_uuid]
2459 vm_list_rest_call = ''.join(url_list)
2460
2461 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
2462 response = Http.get(url=vm_list_rest_call,
2463 headers=self.vca.vcloud_session.get_vcloud_headers(),
2464 verify=self.vca.verify,
2465 logger=self.vca.logger)
2466
2467 #Retry login if session expired & retry sending request
2468 if response.status_code == 403:
2469 response = self.retry_rest('GET', vm_list_rest_call)
2470
2471 if response.status_code == requests.codes.ok:
2472 return response.content
2473
2474 return None
2475
2476 def get_vcd_network(self, network_uuid=None):
2477 """
2478 Method retrieves available network from vCloud Director
2479
2480 Args:
2481 network_uuid - is VCD network UUID
2482
2483 Each element serialized as key : value pair
2484
2485 Following keys available for access. network_configuration['Gateway'}
2486 <Configuration>
2487 <IpScopes>
2488 <IpScope>
2489 <IsInherited>true</IsInherited>
2490 <Gateway>172.16.252.100</Gateway>
2491 <Netmask>255.255.255.0</Netmask>
2492 <Dns1>172.16.254.201</Dns1>
2493 <Dns2>172.16.254.202</Dns2>
2494 <DnsSuffix>vmwarelab.edu</DnsSuffix>
2495 <IsEnabled>true</IsEnabled>
2496 <IpRanges>
2497 <IpRange>
2498 <StartAddress>172.16.252.1</StartAddress>
2499 <EndAddress>172.16.252.99</EndAddress>
2500 </IpRange>
2501 </IpRanges>
2502 </IpScope>
2503 </IpScopes>
2504 <FenceMode>bridged</FenceMode>
2505
2506 Returns:
2507 The return dictionary and key for each entry vapp UUID
2508 """
2509
2510 network_configuration = {}
2511 if network_uuid is None:
2512 return network_uuid
2513
2514 try:
2515 content = self.get_network_action(network_uuid=network_uuid)
2516 vm_list_xmlroot = XmlElementTree.fromstring(content)
2517
2518 network_configuration['status'] = vm_list_xmlroot.get("status")
2519 network_configuration['name'] = vm_list_xmlroot.get("name")
2520 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
2521
2522 for child in vm_list_xmlroot:
2523 if child.tag.split("}")[1] == 'IsShared':
2524 network_configuration['isShared'] = child.text.strip()
2525 if child.tag.split("}")[1] == 'Configuration':
2526 for configuration in child.iter():
2527 tagKey = configuration.tag.split("}")[1].strip()
2528 if tagKey != "":
2529 network_configuration[tagKey] = configuration.text.strip()
2530 return network_configuration
2531 except Exception as exp :
2532 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
2533 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
2534
2535 return network_configuration
2536
2537 def delete_network_action(self, network_uuid=None):
2538 """
2539 Method delete given network from vCloud director
2540
2541 Args:
2542 network_uuid - is a network uuid that client wish to delete
2543
2544 Returns:
2545 The return None or XML respond or false
2546 """
2547
2548 vca = self.connect_as_admin()
2549 if not vca:
2550 raise vimconn.vimconnConnectionException("self.connect() is failed")
2551 if network_uuid is None:
2552 return False
2553
2554 url_list = [vca.host, '/api/admin/network/', network_uuid]
2555 vm_list_rest_call = ''.join(url_list)
2556
2557 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2558 response = Http.delete(url=vm_list_rest_call,
2559 headers=vca.vcloud_session.get_vcloud_headers(),
2560 verify=vca.verify,
2561 logger=vca.logger)
2562
2563 if response.status_code == 202:
2564 return True
2565
2566 return False
2567
2568 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2569 ip_profile=None, isshared='true'):
2570 """
2571 Method create network in vCloud director
2572
2573 Args:
2574 network_name - is network name to be created.
2575 net_type - can be 'bridge','data','ptp','mgmt'.
2576 ip_profile is a dict containing the IP parameters of the network
2577 isshared - is a boolean
2578 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2579 It optional attribute. by default if no parent network indicate the first available will be used.
2580
2581 Returns:
2582 The return network uuid or return None
2583 """
2584
2585 new_network_name = [network_name, '-', str(uuid.uuid4())]
2586 content = self.create_network_rest(network_name=''.join(new_network_name),
2587 ip_profile=ip_profile,
2588 net_type=net_type,
2589 parent_network_uuid=parent_network_uuid,
2590 isshared=isshared)
2591 if content is None:
2592 self.logger.debug("Failed create network {}.".format(network_name))
2593 return None
2594
2595 try:
2596 vm_list_xmlroot = XmlElementTree.fromstring(content)
2597 vcd_uuid = vm_list_xmlroot.get('id').split(":")
2598 if len(vcd_uuid) == 4:
2599 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
2600 return vcd_uuid[3]
2601 except:
2602 self.logger.debug("Failed create network {}".format(network_name))
2603 return None
2604
2605 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
2606 ip_profile=None, isshared='true'):
2607 """
2608 Method create network in vCloud director
2609
2610 Args:
2611 network_name - is network name to be created.
2612 net_type - can be 'bridge','data','ptp','mgmt'.
2613 ip_profile is a dict containing the IP parameters of the network
2614 isshared - is a boolean
2615 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2616 It optional attribute. by default if no parent network indicate the first available will be used.
2617
2618 Returns:
2619 The return network uuid or return None
2620 """
2621
2622 vca = self.connect_as_admin()
2623 if not vca:
2624 raise vimconn.vimconnConnectionException("self.connect() is failed.")
2625 if network_name is None:
2626 return None
2627
2628 url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
2629 vm_list_rest_call = ''.join(url_list)
2630 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2631 response = Http.get(url=vm_list_rest_call,
2632 headers=vca.vcloud_session.get_vcloud_headers(),
2633 verify=vca.verify,
2634 logger=vca.logger)
2635
2636 provider_network = None
2637 available_networks = None
2638 add_vdc_rest_url = None
2639
2640 if response.status_code != requests.codes.ok:
2641 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2642 response.status_code))
2643 return None
2644 else:
2645 try:
2646 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2647 for child in vm_list_xmlroot:
2648 if child.tag.split("}")[1] == 'ProviderVdcReference':
2649 provider_network = child.attrib.get('href')
2650 # application/vnd.vmware.admin.providervdc+xml
2651 if child.tag.split("}")[1] == 'Link':
2652 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
2653 and child.attrib.get('rel') == 'add':
2654 add_vdc_rest_url = child.attrib.get('href')
2655 except:
2656 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2657 self.logger.debug("Respond body {}".format(response.content))
2658 return None
2659
2660 # find pvdc provided available network
2661 response = Http.get(url=provider_network,
2662 headers=vca.vcloud_session.get_vcloud_headers(),
2663 verify=vca.verify,
2664 logger=vca.logger)
2665 if response.status_code != requests.codes.ok:
2666 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2667 response.status_code))
2668 return None
2669
2670 # available_networks.split("/")[-1]
2671
2672 if parent_network_uuid is None:
2673 try:
2674 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2675 for child in vm_list_xmlroot.iter():
2676 if child.tag.split("}")[1] == 'AvailableNetworks':
2677 for networks in child.iter():
2678 # application/vnd.vmware.admin.network+xml
2679 if networks.attrib.get('href') is not None:
2680 available_networks = networks.attrib.get('href')
2681 break
2682 except:
2683 return None
2684
2685 try:
2686 #Configure IP profile of the network
2687 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
2688
2689 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
2690 subnet_rand = random.randint(0, 255)
2691 ip_base = "192.168.{}.".format(subnet_rand)
2692 ip_profile['subnet_address'] = ip_base + "0/24"
2693 else:
2694 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
2695
2696 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
2697 ip_profile['gateway_address']=ip_base + "1"
2698 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
2699 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
2700 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
2701 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
2702 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
2703 ip_profile['dhcp_start_address']=ip_base + "3"
2704 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
2705 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
2706 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
2707 ip_profile['dns_address']=ip_base + "2"
2708
2709 gateway_address=ip_profile['gateway_address']
2710 dhcp_count=int(ip_profile['dhcp_count'])
2711 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
2712
2713 if ip_profile['dhcp_enabled']==True:
2714 dhcp_enabled='true'
2715 else:
2716 dhcp_enabled='false'
2717 dhcp_start_address=ip_profile['dhcp_start_address']
2718
2719 #derive dhcp_end_address from dhcp_start_address & dhcp_count
2720 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
2721 end_ip_int += dhcp_count - 1
2722 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
2723
2724 ip_version=ip_profile['ip_version']
2725 dns_address=ip_profile['dns_address']
2726 except KeyError as exp:
2727 self.logger.debug("Create Network REST: Key error {}".format(exp))
2728 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
2729
2730 # either use client provided UUID or search for a first available
2731 # if both are not defined we return none
2732 if parent_network_uuid is not None:
2733 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
2734 add_vdc_rest_url = ''.join(url_list)
2735
2736 #Creating all networks as Direct Org VDC type networks.
2737 #Unused in case of Underlay (data/ptp) network interface.
2738 fence_mode="bridged"
2739 is_inherited='false'
2740 dns_list = dns_address.split(";")
2741 dns1 = dns_list[0]
2742 dns2_text = ""
2743 if len(dns_list) >= 2:
2744 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
2745 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2746 <Description>Openmano created</Description>
2747 <Configuration>
2748 <IpScopes>
2749 <IpScope>
2750 <IsInherited>{1:s}</IsInherited>
2751 <Gateway>{2:s}</Gateway>
2752 <Netmask>{3:s}</Netmask>
2753 <Dns1>{4:s}</Dns1>{5:s}
2754 <IsEnabled>{6:s}</IsEnabled>
2755 <IpRanges>
2756 <IpRange>
2757 <StartAddress>{7:s}</StartAddress>
2758 <EndAddress>{8:s}</EndAddress>
2759 </IpRange>
2760 </IpRanges>
2761 </IpScope>
2762 </IpScopes>
2763 <ParentNetwork href="{9:s}"/>
2764 <FenceMode>{10:s}</FenceMode>
2765 </Configuration>
2766 <IsShared>{11:s}</IsShared>
2767 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
2768 subnet_address, dns1, dns2_text, dhcp_enabled,
2769 dhcp_start_address, dhcp_end_address, available_networks,
2770 fence_mode, isshared)
2771
2772 headers = vca.vcloud_session.get_vcloud_headers()
2773 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
2774 try:
2775 response = Http.post(url=add_vdc_rest_url,
2776 headers=headers,
2777 data=data,
2778 verify=vca.verify,
2779 logger=vca.logger)
2780
2781 if response.status_code != 201:
2782 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
2783 .format(response.status_code,response.content))
2784 else:
2785 network = networkType.parseString(response.content, True)
2786 create_nw_task = network.get_Tasks().get_Task()[0]
2787
2788 # if we all ok we respond with content after network creation completes
2789 # otherwise by default return None
2790 if create_nw_task is not None:
2791 self.logger.debug("Create Network REST : Waiting for Network creation complete")
2792 status = vca.block_until_completed(create_nw_task)
2793 if status:
2794 return response.content
2795 else:
2796 self.logger.debug("create_network_rest task failed. Network Create response : {}"
2797 .format(response.content))
2798 except Exception as exp:
2799 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
2800
2801 return None
2802
2803 def convert_cidr_to_netmask(self, cidr_ip=None):
2804 """
2805 Method sets convert CIDR netmask address to normal IP format
2806 Args:
2807 cidr_ip : CIDR IP address
2808 Returns:
2809 netmask : Converted netmask
2810 """
2811 if cidr_ip is not None:
2812 if '/' in cidr_ip:
2813 network, net_bits = cidr_ip.split('/')
2814 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
2815 else:
2816 netmask = cidr_ip
2817 return netmask
2818 return None
2819
2820 def get_provider_rest(self, vca=None):
2821 """
2822 Method gets provider vdc view from vcloud director
2823
2824 Args:
2825 network_name - is network name to be created.
2826 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2827 It optional attribute. by default if no parent network indicate the first available will be used.
2828
2829 Returns:
2830 The return xml content of respond or None
2831 """
2832
2833 url_list = [vca.host, '/api/admin']
2834 response = Http.get(url=''.join(url_list),
2835 headers=vca.vcloud_session.get_vcloud_headers(),
2836 verify=vca.verify,
2837 logger=vca.logger)
2838
2839 if response.status_code == requests.codes.ok:
2840 return response.content
2841 return None
2842
2843 def create_vdc(self, vdc_name=None):
2844
2845 vdc_dict = {}
2846
2847 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
2848 if xml_content is not None:
2849 try:
2850 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
2851 for child in task_resp_xmlroot:
2852 if child.tag.split("}")[1] == 'Owner':
2853 vdc_id = child.attrib.get('href').split("/")[-1]
2854 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
2855 return vdc_dict
2856 except:
2857 self.logger.debug("Respond body {}".format(xml_content))
2858
2859 return None
2860
2861 def create_vdc_from_tmpl_rest(self, vdc_name=None):
2862 """
2863 Method create vdc in vCloud director based on VDC template.
2864 it uses pre-defined template that must be named openmano
2865
2866 Args:
2867 vdc_name - name of a new vdc.
2868
2869 Returns:
2870 The return xml content of respond or None
2871 """
2872
2873 self.logger.info("Creating new vdc {}".format(vdc_name))
2874 vca = self.connect()
2875 if not vca:
2876 raise vimconn.vimconnConnectionException("self.connect() is failed")
2877 if vdc_name is None:
2878 return None
2879
2880 url_list = [vca.host, '/api/vdcTemplates']
2881 vm_list_rest_call = ''.join(url_list)
2882 response = Http.get(url=vm_list_rest_call,
2883 headers=vca.vcloud_session.get_vcloud_headers(),
2884 verify=vca.verify,
2885 logger=vca.logger)
2886
2887 # container url to a template
2888 vdc_template_ref = None
2889 try:
2890 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2891 for child in vm_list_xmlroot:
2892 # application/vnd.vmware.admin.providervdc+xml
2893 # we need find a template from witch we instantiate VDC
2894 if child.tag.split("}")[1] == 'VdcTemplate':
2895 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
2896 'name') == 'openmano':
2897 vdc_template_ref = child.attrib.get('href')
2898 except:
2899 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2900 self.logger.debug("Respond body {}".format(response.content))
2901 return None
2902
2903 # if we didn't found required pre defined template we return None
2904 if vdc_template_ref is None:
2905 return None
2906
2907 try:
2908 # instantiate vdc
2909 url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
2910 vm_list_rest_call = ''.join(url_list)
2911 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
2912 <Source href="{1:s}"></Source>
2913 <Description>opnemano</Description>
2914 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
2915 headers = vca.vcloud_session.get_vcloud_headers()
2916 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
2917 response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
2918 logger=vca.logger)
2919 # if we all ok we respond with content otherwise by default None
2920 if response.status_code >= 200 and response.status_code < 300:
2921 return response.content
2922 return None
2923 except:
2924 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2925 self.logger.debug("Respond body {}".format(response.content))
2926
2927 return None
2928
2929 def create_vdc_rest(self, vdc_name=None):
2930 """
2931 Method create network in vCloud director
2932
2933 Args:
2934 network_name - is network name to be created.
2935 parent_network_uuid - is parent provider vdc network that will be used for mapping.
2936 It optional attribute. by default if no parent network indicate the first available will be used.
2937
2938 Returns:
2939 The return network uuid or return None
2940 """
2941
2942 self.logger.info("Creating new vdc {}".format(vdc_name))
2943
2944 vca = self.connect_as_admin()
2945 if not vca:
2946 raise vimconn.vimconnConnectionException("self.connect() is failed")
2947 if vdc_name is None:
2948 return None
2949
2950 url_list = [vca.host, '/api/admin/org/', self.org_uuid]
2951 vm_list_rest_call = ''.join(url_list)
2952 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
2953 response = Http.get(url=vm_list_rest_call,
2954 headers=vca.vcloud_session.get_vcloud_headers(),
2955 verify=vca.verify,
2956 logger=vca.logger)
2957
2958 provider_vdc_ref = None
2959 add_vdc_rest_url = None
2960 available_networks = None
2961
2962 if response.status_code != requests.codes.ok:
2963 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
2964 response.status_code))
2965 return None
2966 else:
2967 try:
2968 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
2969 for child in vm_list_xmlroot:
2970 # application/vnd.vmware.admin.providervdc+xml
2971 if child.tag.split("}")[1] == 'Link':
2972 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
2973 and child.attrib.get('rel') == 'add':
2974 add_vdc_rest_url = child.attrib.get('href')
2975 except:
2976 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2977 self.logger.debug("Respond body {}".format(response.content))
2978 return None
2979
2980 response = self.get_provider_rest(vca=vca)
2981 try:
2982 vm_list_xmlroot = XmlElementTree.fromstring(response)
2983 for child in vm_list_xmlroot:
2984 if child.tag.split("}")[1] == 'ProviderVdcReferences':
2985 for sub_child in child:
2986 provider_vdc_ref = sub_child.attrib.get('href')
2987 except:
2988 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
2989 self.logger.debug("Respond body {}".format(response))
2990 return None
2991
2992 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
2993 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
2994 <AllocationModel>ReservationPool</AllocationModel>
2995 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
2996 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
2997 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
2998 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
2999 <ProviderVdcReference
3000 name="Main Provider"
3001 href="{2:s}" />
3002 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
3003 escape(vdc_name),
3004 provider_vdc_ref)
3005
3006 headers = vca.vcloud_session.get_vcloud_headers()
3007 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
3008 response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
3009 logger=vca.logger)
3010
3011 # if we all ok we respond with content otherwise by default None
3012 if response.status_code == 201:
3013 return response.content
3014 return None
3015
3016 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
3017 """
3018 Method retrieve vapp detail from vCloud director
3019
3020 Args:
3021 vapp_uuid - is vapp identifier.
3022
3023 Returns:
3024 The return network uuid or return None
3025 """
3026
3027 parsed_respond = {}
3028 vca = None
3029
3030 if need_admin_access:
3031 vca = self.connect_as_admin()
3032 else:
3033 vca = self.vca
3034
3035 if not vca:
3036 raise vimconn.vimconnConnectionException("self.connect() is failed")
3037 if vapp_uuid is None:
3038 return None
3039
3040 url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
3041 get_vapp_restcall = ''.join(url_list)
3042
3043 if vca.vcloud_session and vca.vcloud_session.organization:
3044 response = Http.get(url=get_vapp_restcall,
3045 headers=vca.vcloud_session.get_vcloud_headers(),
3046 verify=vca.verify,
3047 logger=vca.logger)
3048
3049 if response.status_code == 403:
3050 if need_admin_access == False:
3051 response = self.retry_rest('GET', get_vapp_restcall)
3052
3053 if response.status_code != requests.codes.ok:
3054 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
3055 response.status_code))
3056 return parsed_respond
3057
3058 try:
3059 xmlroot_respond = XmlElementTree.fromstring(response.content)
3060 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
3061
3062 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
3063 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
3064 'vmw': 'http://www.vmware.com/schema/ovf',
3065 'vm': 'http://www.vmware.com/vcloud/v1.5',
3066 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
3067 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
3068 "xmlns":"http://www.vmware.com/vcloud/v1.5"
3069 }
3070
3071 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
3072 if created_section is not None:
3073 parsed_respond['created'] = created_section.text
3074
3075 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
3076 if network_section is not None and 'networkName' in network_section.attrib:
3077 parsed_respond['networkname'] = network_section.attrib['networkName']
3078
3079 ipscopes_section = \
3080 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
3081 namespaces)
3082 if ipscopes_section is not None:
3083 for ipscope in ipscopes_section:
3084 for scope in ipscope:
3085 tag_key = scope.tag.split("}")[1]
3086 if tag_key == 'IpRanges':
3087 ip_ranges = scope.getchildren()
3088 for ipblock in ip_ranges:
3089 for block in ipblock:
3090 parsed_respond[block.tag.split("}")[1]] = block.text
3091 else:
3092 parsed_respond[tag_key] = scope.text
3093
3094 # parse children section for other attrib
3095 children_section = xmlroot_respond.find('vm:Children/', namespaces)
3096 if children_section is not None:
3097 parsed_respond['name'] = children_section.attrib['name']
3098 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
3099 if "nestedHypervisorEnabled" in children_section.attrib else None
3100 parsed_respond['deployed'] = children_section.attrib['deployed']
3101 parsed_respond['status'] = children_section.attrib['status']
3102 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
3103 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
3104 nic_list = []
3105 for adapters in network_adapter:
3106 adapter_key = adapters.tag.split("}")[1]
3107 if adapter_key == 'PrimaryNetworkConnectionIndex':
3108 parsed_respond['primarynetwork'] = adapters.text
3109 if adapter_key == 'NetworkConnection':
3110 vnic = {}
3111 if 'network' in adapters.attrib:
3112 vnic['network'] = adapters.attrib['network']
3113 for adapter in adapters:
3114 setting_key = adapter.tag.split("}")[1]
3115 vnic[setting_key] = adapter.text
3116 nic_list.append(vnic)
3117
3118 for link in children_section:
3119 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3120 if link.attrib['rel'] == 'screen:acquireTicket':
3121 parsed_respond['acquireTicket'] = link.attrib
3122 if link.attrib['rel'] == 'screen:acquireMksTicket':
3123 parsed_respond['acquireMksTicket'] = link.attrib
3124
3125 parsed_respond['interfaces'] = nic_list
3126 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
3127 if vCloud_extension_section is not None:
3128 vm_vcenter_info = {}
3129 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
3130 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
3131 if vmext is not None:
3132 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
3133 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
3134
3135 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
3136 vm_virtual_hardware_info = {}
3137 if virtual_hardware_section is not None:
3138 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
3139 if item.find("rasd:Description",namespaces).text == "Hard disk":
3140 disk_size = item.find("rasd:HostResource" ,namespaces
3141 ).attrib["{"+namespaces['vm']+"}capacity"]
3142
3143 vm_virtual_hardware_info["disk_size"]= disk_size
3144 break
3145
3146 for link in virtual_hardware_section:
3147 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
3148 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
3149 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
3150 break
3151
3152 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
3153 except Exception as exp :
3154 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
3155 return parsed_respond
3156
3157 def acuire_console(self, vm_uuid=None):
3158
3159 if vm_uuid is None:
3160 return None
3161
3162 if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
3163 vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
3164 console_dict = vm_dict['acquireTicket']
3165 console_rest_call = console_dict['href']
3166
3167 response = Http.post(url=console_rest_call,
3168 headers=self.vca.vcloud_session.get_vcloud_headers(),
3169 verify=self.vca.verify,
3170 logger=self.vca.logger)
3171 if response.status_code == 403:
3172 response = self.retry_rest('POST', console_rest_call)
3173
3174 if response.status_code == requests.codes.ok:
3175 return response.content
3176
3177 return None
3178
3179 def modify_vm_disk(self, vapp_uuid, flavor_disk):
3180 """
3181 Method retrieve vm disk details
3182
3183 Args:
3184 vapp_uuid - is vapp identifier.
3185 flavor_disk - disk size as specified in VNFD (flavor)
3186
3187 Returns:
3188 The return network uuid or return None
3189 """
3190 status = None
3191 try:
3192 #Flavor disk is in GB convert it into MB
3193 flavor_disk = int(flavor_disk) * 1024
3194 vm_details = self.get_vapp_details_rest(vapp_uuid)
3195 if vm_details:
3196 vm_name = vm_details["name"]
3197 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
3198
3199 if vm_details and "vm_virtual_hardware" in vm_details:
3200 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
3201 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
3202
3203 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
3204
3205 if flavor_disk > vm_disk:
3206 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
3207 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
3208 vm_disk, flavor_disk ))
3209 else:
3210 status = True
3211 self.logger.info("No need to modify disk of VM {}".format(vm_name))
3212
3213 return status
3214 except Exception as exp:
3215 self.logger.info("Error occurred while modifing disk size {}".format(exp))
3216
3217
3218 def modify_vm_disk_rest(self, disk_href , disk_size):
3219 """
3220 Method retrieve modify vm disk size
3221
3222 Args:
3223 disk_href - vCD API URL to GET and PUT disk data
3224 disk_size - disk size as specified in VNFD (flavor)
3225
3226 Returns:
3227 The return network uuid or return None
3228 """
3229 if disk_href is None or disk_size is None:
3230 return None
3231
3232 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
3233 response = Http.get(url=disk_href,
3234 headers=self.vca.vcloud_session.get_vcloud_headers(),
3235 verify=self.vca.verify,
3236 logger=self.vca.logger)
3237
3238 if response.status_code == 403:
3239 response = self.retry_rest('GET', disk_href)
3240
3241 if response.status_code != requests.codes.ok:
3242 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
3243 response.status_code))
3244 return None
3245 try:
3246 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
3247 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
3248 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
3249
3250 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
3251 if item.find("rasd:Description",namespaces).text == "Hard disk":
3252 disk_item = item.find("rasd:HostResource" ,namespaces )
3253 if disk_item is not None:
3254 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
3255 break
3256
3257 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
3258 xml_declaration=True)
3259
3260 #Send PUT request to modify disk size
3261 headers = self.vca.vcloud_session.get_vcloud_headers()
3262 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
3263
3264 response = Http.put(url=disk_href,
3265 data=data,
3266 headers=headers,
3267 verify=self.vca.verify, logger=self.logger)
3268
3269 if response.status_code == 403:
3270 add_headers = {'Content-Type': headers['Content-Type']}
3271 response = self.retry_rest('PUT', disk_href, add_headers, data)
3272
3273 if response.status_code != 202:
3274 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
3275 response.status_code))
3276 else:
3277 modify_disk_task = taskType.parseString(response.content, True)
3278 if type(modify_disk_task) is GenericTask:
3279 status = self.vca.block_until_completed(modify_disk_task)
3280 return status
3281
3282 return None
3283
3284 except Exception as exp :
3285 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
3286 return None
3287
3288 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
3289 """
3290 Method to attach pci devices to VM
3291
3292 Args:
3293 vapp_uuid - uuid of vApp/VM
3294 pci_devices - pci devices infromation as specified in VNFD (flavor)
3295
3296 Returns:
3297 The status of add pci device task , vm object and
3298 vcenter_conect object
3299 """
3300 vm_obj = None
3301 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
3302 vcenter_conect, content = self.get_vcenter_content()
3303 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
3304
3305 if vm_moref_id:
3306 try:
3307 no_of_pci_devices = len(pci_devices)
3308 if no_of_pci_devices > 0:
3309 #Get VM and its host
3310 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3311 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
3312 if host_obj and vm_obj:
3313 #get PCI devies from host on which vapp is currently installed
3314 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
3315
3316 if avilable_pci_devices is None:
3317 #find other hosts with active pci devices
3318 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
3319 content,
3320 no_of_pci_devices
3321 )
3322
3323 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3324 #Migrate vm to the host where PCI devices are availble
3325 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
3326 task = self.relocate_vm(new_host_obj, vm_obj)
3327 if task is not None:
3328 result = self.wait_for_vcenter_task(task, vcenter_conect)
3329 self.logger.info("Migrate VM status: {}".format(result))
3330 host_obj = new_host_obj
3331 else:
3332 self.logger.info("Fail to migrate VM : {}".format(result))
3333 raise vimconn.vimconnNotFoundException(
3334 "Fail to migrate VM : {} to host {}".format(
3335 vmname_andid,
3336 new_host_obj)
3337 )
3338
3339 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
3340 #Add PCI devices one by one
3341 for pci_device in avilable_pci_devices:
3342 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
3343 if task:
3344 status= self.wait_for_vcenter_task(task, vcenter_conect)
3345 if status:
3346 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3347 else:
3348 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
3349 return True, vm_obj, vcenter_conect
3350 else:
3351 self.logger.error("Currently there is no host with"\
3352 " {} number of avaialble PCI devices required for VM {}".format(
3353 no_of_pci_devices,
3354 vmname_andid)
3355 )
3356 raise vimconn.vimconnNotFoundException(
3357 "Currently there is no host with {} "\
3358 "number of avaialble PCI devices required for VM {}".format(
3359 no_of_pci_devices,
3360 vmname_andid))
3361 else:
3362 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
3363
3364 except vmodl.MethodFault as error:
3365 self.logger.error("Error occurred while adding PCI devices {} ",error)
3366 return None, vm_obj, vcenter_conect
3367
3368 def get_vm_obj(self, content, mob_id):
3369 """
3370 Method to get the vsphere VM object associated with a given morf ID
3371 Args:
3372 vapp_uuid - uuid of vApp/VM
3373 content - vCenter content object
3374 mob_id - mob_id of VM
3375
3376 Returns:
3377 VM and host object
3378 """
3379 vm_obj = None
3380 host_obj = None
3381 try :
3382 container = content.viewManager.CreateContainerView(content.rootFolder,
3383 [vim.VirtualMachine], True
3384 )
3385 for vm in container.view:
3386 mobID = vm._GetMoId()
3387 if mobID == mob_id:
3388 vm_obj = vm
3389 host_obj = vm_obj.runtime.host
3390 break
3391 except Exception as exp:
3392 self.logger.error("Error occurred while finding VM object : {}".format(exp))
3393 return host_obj, vm_obj
3394
3395 def get_pci_devices(self, host, need_devices):
3396 """
3397 Method to get the details of pci devices on given host
3398 Args:
3399 host - vSphere host object
3400 need_devices - number of pci devices needed on host
3401
3402 Returns:
3403 array of pci devices
3404 """
3405 all_devices = []
3406 all_device_ids = []
3407 used_devices_ids = []
3408
3409 try:
3410 if host:
3411 pciPassthruInfo = host.config.pciPassthruInfo
3412 pciDevies = host.hardware.pciDevice
3413
3414 for pci_status in pciPassthruInfo:
3415 if pci_status.passthruActive:
3416 for device in pciDevies:
3417 if device.id == pci_status.id:
3418 all_device_ids.append(device.id)
3419 all_devices.append(device)
3420
3421 #check if devices are in use
3422 avalible_devices = all_devices
3423 for vm in host.vm:
3424 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
3425 vm_devices = vm.config.hardware.device
3426 for device in vm_devices:
3427 if type(device) is vim.vm.device.VirtualPCIPassthrough:
3428 if device.backing.id in all_device_ids:
3429 for use_device in avalible_devices:
3430 if use_device.id == device.backing.id:
3431 avalible_devices.remove(use_device)
3432 used_devices_ids.append(device.backing.id)
3433 self.logger.debug("Device {} from devices {}"\
3434 "is in use".format(device.backing.id,
3435 device)
3436 )
3437 if len(avalible_devices) < need_devices:
3438 self.logger.debug("Host {} don't have {} number of active devices".format(host,
3439 need_devices))
3440 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
3441 avalible_devices))
3442 return None
3443 else:
3444 required_devices = avalible_devices[:need_devices]
3445 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
3446 len(avalible_devices),
3447 host,
3448 need_devices))
3449 self.logger.info("Retruning {} devices as {}".format(need_devices,
3450 required_devices ))
3451 return required_devices
3452
3453 except Exception as exp:
3454 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
3455
3456 return None
3457
3458 def get_host_and_PCIdevices(self, content, need_devices):
3459 """
3460 Method to get the details of pci devices infromation on all hosts
3461
3462 Args:
3463 content - vSphere host object
3464 need_devices - number of pci devices needed on host
3465
3466 Returns:
3467 array of pci devices and host object
3468 """
3469 host_obj = None
3470 pci_device_objs = None
3471 try:
3472 if content:
3473 container = content.viewManager.CreateContainerView(content.rootFolder,
3474 [vim.HostSystem], True)
3475 for host in container.view:
3476 devices = self.get_pci_devices(host, need_devices)
3477 if devices:
3478 host_obj = host
3479 pci_device_objs = devices
3480 break
3481 except Exception as exp:
3482 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
3483
3484 return host_obj,pci_device_objs
3485
3486 def relocate_vm(self, dest_host, vm) :
3487 """
3488 Method to get the relocate VM to new host
3489
3490 Args:
3491 dest_host - vSphere host object
3492 vm - vSphere VM object
3493
3494 Returns:
3495 task object
3496 """
3497 task = None
3498 try:
3499 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
3500 task = vm.Relocate(relocate_spec)
3501 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
3502 except Exception as exp:
3503 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
3504 dest_host, vm, exp))
3505 return task
3506
3507 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
3508 """
3509 Waits and provides updates on a vSphere task
3510 """
3511 while task.info.state == vim.TaskInfo.State.running:
3512 time.sleep(2)
3513
3514 if task.info.state == vim.TaskInfo.State.success:
3515 if task.info.result is not None and not hideResult:
3516 self.logger.info('{} completed successfully, result: {}'.format(
3517 actionName,
3518 task.info.result))
3519 else:
3520 self.logger.info('Task {} completed successfully.'.format(actionName))
3521 else:
3522 self.logger.error('{} did not complete successfully: {} '.format(
3523 actionName,
3524 task.info.error)
3525 )
3526
3527 return task.info.result
3528
3529 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
3530 """
3531 Method to add pci device in given VM
3532
3533 Args:
3534 host_object - vSphere host object
3535 vm_object - vSphere VM object
3536 host_pci_dev - host_pci_dev must be one of the devices from the
3537 host_object.hardware.pciDevice list
3538 which is configured as a PCI passthrough device
3539
3540 Returns:
3541 task object
3542 """
3543 task = None
3544 if vm_object and host_object and host_pci_dev:
3545 try :
3546 #Add PCI device to VM
3547 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
3548 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
3549
3550 if host_pci_dev.id not in systemid_by_pciid:
3551 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
3552 return None
3553
3554 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
3555 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
3556 id=host_pci_dev.id,
3557 systemId=systemid_by_pciid[host_pci_dev.id],
3558 vendorId=host_pci_dev.vendorId,
3559 deviceName=host_pci_dev.deviceName)
3560
3561 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
3562
3563 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
3564 new_device_config.operation = "add"
3565 vmConfigSpec = vim.vm.ConfigSpec()
3566 vmConfigSpec.deviceChange = [new_device_config]
3567
3568 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
3569 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
3570 host_pci_dev, vm_object, host_object)
3571 )
3572 except Exception as exp:
3573 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
3574 host_pci_dev,
3575 vm_object,
3576 exp))
3577 return task
3578
3579 def get_vm_vcenter_info(self):
3580 """
3581 Method to get details of vCenter and vm
3582
3583 Args:
3584 vapp_uuid - uuid of vApp or VM
3585
3586 Returns:
3587 Moref Id of VM and deails of vCenter
3588 """
3589 vm_vcenter_info = {}
3590
3591 if self.vcenter_ip is not None:
3592 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
3593 else:
3594 raise vimconn.vimconnException(message="vCenter IP is not provided."\
3595 " Please provide vCenter IP while attaching datacenter to tenant in --config")
3596 if self.vcenter_port is not None:
3597 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
3598 else:
3599 raise vimconn.vimconnException(message="vCenter port is not provided."\
3600 " Please provide vCenter port while attaching datacenter to tenant in --config")
3601 if self.vcenter_user is not None:
3602 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
3603 else:
3604 raise vimconn.vimconnException(message="vCenter user is not provided."\
3605 " Please provide vCenter user while attaching datacenter to tenant in --config")
3606
3607 if self.vcenter_password is not None:
3608 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
3609 else:
3610 raise vimconn.vimconnException(message="vCenter user password is not provided."\
3611 " Please provide vCenter user password while attaching datacenter to tenant in --config")
3612
3613 return vm_vcenter_info
3614
3615
3616 def get_vm_pci_details(self, vmuuid):
3617 """
3618 Method to get VM PCI device details from vCenter
3619
3620 Args:
3621 vm_obj - vSphere VM object
3622
3623 Returns:
3624 dict of PCI devives attached to VM
3625
3626 """
3627 vm_pci_devices_info = {}
3628 try:
3629 vcenter_conect, content = self.get_vcenter_content()
3630 vm_moref_id = self.get_vm_moref_id(vmuuid)
3631 if vm_moref_id:
3632 #Get VM and its host
3633 if content:
3634 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
3635 if host_obj and vm_obj:
3636 vm_pci_devices_info["host_name"]= host_obj.name
3637 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
3638 for device in vm_obj.config.hardware.device:
3639 if type(device) == vim.vm.device.VirtualPCIPassthrough:
3640 device_details={'devide_id':device.backing.id,
3641 'pciSlotNumber':device.slotInfo.pciSlotNumber,
3642 }
3643 vm_pci_devices_info[device.deviceInfo.label] = device_details
3644 else:
3645 self.logger.error("Can not connect to vCenter while getting "\
3646 "PCI devices infromationn")
3647 return vm_pci_devices_info
3648 except Exception as exp:
3649 self.logger.error("Error occurred while getting VM infromationn"\
3650 " for VM : {}".format(exp))
3651 raise vimconn.vimconnException(message=exp)
3652
3653 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
3654 """
3655 Method to add network adapter type to vm
3656 Args :
3657 network_name - name of network
3658 primary_nic_index - int value for primary nic index
3659 nicIndex - int value for nic index
3660 nic_type - specify model name to which add to vm
3661 Returns:
3662 None
3663 """
3664
3665 try:
3666 ip_address = None
3667 floating_ip = False
3668 if 'floating_ip' in net: floating_ip = net['floating_ip']
3669
3670 # Stub for ip_address feature
3671 if 'ip_address' in net: ip_address = net['ip_address']
3672
3673 if floating_ip:
3674 allocation_mode = "POOL"
3675 elif ip_address:
3676 allocation_mode = "MANUAL"
3677 else:
3678 allocation_mode = "DHCP"
3679
3680 if not nic_type:
3681 for vms in vapp._get_vms():
3682 vm_id = (vms.id).split(':')[-1]
3683
3684 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3685
3686 response = Http.get(url=url_rest_call,
3687 headers=self.vca.vcloud_session.get_vcloud_headers(),
3688 verify=self.vca.verify,
3689 logger=self.vca.logger)
3690
3691 if response.status_code == 403:
3692 response = self.retry_rest('GET', url_rest_call)
3693
3694 if response.status_code != 200:
3695 self.logger.error("REST call {} failed reason : {}"\
3696 "status code : {}".format(url_rest_call,
3697 response.content,
3698 response.status_code))
3699 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3700 "network connection section")
3701
3702 data = response.content
3703 if '<PrimaryNetworkConnectionIndex>' not in data:
3704 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3705 <NetworkConnection network="{}">
3706 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3707 <IsConnected>true</IsConnected>
3708 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3709 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3710 allocation_mode)
3711 # Stub for ip_address feature
3712 if ip_address:
3713 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3714 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3715
3716 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3717 else:
3718 new_item = """<NetworkConnection network="{}">
3719 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3720 <IsConnected>true</IsConnected>
3721 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3722 </NetworkConnection>""".format(network_name, nicIndex,
3723 allocation_mode)
3724 # Stub for ip_address feature
3725 if ip_address:
3726 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3727 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3728
3729 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3730
3731 headers = self.vca.vcloud_session.get_vcloud_headers()
3732 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3733 response = Http.put(url=url_rest_call, headers=headers, data=data,
3734 verify=self.vca.verify,
3735 logger=self.vca.logger)
3736
3737 if response.status_code == 403:
3738 add_headers = {'Content-Type': headers['Content-Type']}
3739 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3740
3741 if response.status_code != 202:
3742 self.logger.error("REST call {} failed reason : {}"\
3743 "status code : {} ".format(url_rest_call,
3744 response.content,
3745 response.status_code))
3746 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3747 "network connection section")
3748 else:
3749 nic_task = taskType.parseString(response.content, True)
3750 if isinstance(nic_task, GenericTask):
3751 self.vca.block_until_completed(nic_task)
3752 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
3753 "default NIC type".format(vm_id))
3754 else:
3755 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
3756 "connect NIC type".format(vm_id))
3757 else:
3758 for vms in vapp._get_vms():
3759 vm_id = (vms.id).split(':')[-1]
3760
3761 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
3762
3763 response = Http.get(url=url_rest_call,
3764 headers=self.vca.vcloud_session.get_vcloud_headers(),
3765 verify=self.vca.verify,
3766 logger=self.vca.logger)
3767
3768 if response.status_code == 403:
3769 response = self.retry_rest('GET', url_rest_call)
3770
3771 if response.status_code != 200:
3772 self.logger.error("REST call {} failed reason : {}"\
3773 "status code : {}".format(url_rest_call,
3774 response.content,
3775 response.status_code))
3776 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
3777 "network connection section")
3778 data = response.content
3779 if '<PrimaryNetworkConnectionIndex>' not in data:
3780 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
3781 <NetworkConnection network="{}">
3782 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3783 <IsConnected>true</IsConnected>
3784 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3785 <NetworkAdapterType>{}</NetworkAdapterType>
3786 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
3787 allocation_mode, nic_type)
3788 # Stub for ip_address feature
3789 if ip_address:
3790 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3791 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3792
3793 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
3794 else:
3795 new_item = """<NetworkConnection network="{}">
3796 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
3797 <IsConnected>true</IsConnected>
3798 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
3799 <NetworkAdapterType>{}</NetworkAdapterType>
3800 </NetworkConnection>""".format(network_name, nicIndex,
3801 allocation_mode, nic_type)
3802 # Stub for ip_address feature
3803 if ip_address:
3804 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
3805 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
3806
3807 data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
3808
3809 headers = self.vca.vcloud_session.get_vcloud_headers()
3810 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
3811 response = Http.put(url=url_rest_call, headers=headers, data=data,
3812 verify=self.vca.verify,
3813 logger=self.vca.logger)
3814
3815 if response.status_code == 403:
3816 add_headers = {'Content-Type': headers['Content-Type']}
3817 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
3818
3819 if response.status_code != 202:
3820 self.logger.error("REST call {} failed reason : {}"\
3821 "status code : {}".format(url_rest_call,
3822 response.content,
3823 response.status_code))
3824 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
3825 "network connection section")
3826 else:
3827 nic_task = taskType.parseString(response.content, True)
3828 if isinstance(nic_task, GenericTask):
3829 self.vca.block_until_completed(nic_task)
3830 self.logger.info("add_network_adapter_to_vms(): VM {} "\
3831 "conneced to NIC type {}".format(vm_id, nic_type))
3832 else:
3833 self.logger.error("add_network_adapter_to_vms(): VM {} "\
3834 "failed to connect NIC type {}".format(vm_id, nic_type))
3835 except Exception as exp:
3836 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
3837 "while adding Network adapter")
3838 raise vimconn.vimconnException(message=exp)
3839
3840
3841 def set_numa_affinity(self, vmuuid, paired_threads_id):
3842 """
3843 Method to assign numa affinity in vm configuration parammeters
3844 Args :
3845 vmuuid - vm uuid
3846 paired_threads_id - one or more virtual processor
3847 numbers
3848 Returns:
3849 return if True
3850 """
3851 try:
3852 vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
3853 if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
3854 context = None
3855 if hasattr(ssl, '_create_unverified_context'):
3856 context = ssl._create_unverified_context()
3857 vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
3858 pwd=self.passwd, port=int(vm_vcenter_port),
3859 sslContext=context)
3860 atexit.register(Disconnect, vcenter_conect)
3861 content = vcenter_conect.RetrieveContent()
3862
3863 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
3864 if vm_obj:
3865 config_spec = vim.vm.ConfigSpec()
3866 config_spec.extraConfig = []
3867 opt = vim.option.OptionValue()
3868 opt.key = 'numa.nodeAffinity'
3869 opt.value = str(paired_threads_id)
3870 config_spec.extraConfig.append(opt)
3871 task = vm_obj.ReconfigVM_Task(config_spec)
3872 if task:
3873 result = self.wait_for_vcenter_task(task, vcenter_conect)
3874 extra_config = vm_obj.config.extraConfig
3875 flag = False
3876 for opts in extra_config:
3877 if 'numa.nodeAffinity' in opts.key:
3878 flag = True
3879 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
3880 "value {} for vm {}".format(opt.value, vm_obj))
3881 if flag:
3882 return
3883 else:
3884 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
3885 except Exception as exp:
3886 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
3887 "for VM {} : {}".format(vm_obj, vm_moref_id))
3888 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
3889 "affinity".format(exp))
3890
3891
3892
3893 def cloud_init(self, vapp, cloud_config):
3894 """
3895 Method to inject ssh-key
3896 vapp - vapp object
3897 cloud_config a dictionary with:
3898 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
3899 'users': (optional) list of users to be inserted, each item is a dict with:
3900 'name': (mandatory) user name,
3901 'key-pairs': (optional) list of strings with the public key to be inserted to the user
3902 'user-data': (optional) string is a text script to be passed directly to cloud-init
3903 'config-files': (optional). List of files to be transferred. Each item is a dict with:
3904 'dest': (mandatory) string with the destination absolute path
3905 'encoding': (optional, by default text). Can be one of:
3906 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
3907 'content' (mandatory): string with the content of the file
3908 'permissions': (optional) string with file permissions, typically octal notation '0644'
3909 'owner': (optional) file owner, string with the format 'owner:group'
3910 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
3911 """
3912
3913 try:
3914 if isinstance(cloud_config, dict):
3915 key_pairs = []
3916 userdata = []
3917 if "key-pairs" in cloud_config:
3918 key_pairs = cloud_config["key-pairs"]
3919
3920 if "users" in cloud_config:
3921 userdata = cloud_config["users"]
3922
3923 for key in key_pairs:
3924 for user in userdata:
3925 if 'name' in user: user_name = user['name']
3926 if 'key-pairs' in user and len(user['key-pairs']) > 0:
3927 for user_key in user['key-pairs']:
3928 customize_script = """
3929 #!/bin/bash
3930 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3931 if [ "$1" = "precustomization" ];then
3932 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
3933 if [ ! -d /root/.ssh ];then
3934 mkdir /root/.ssh
3935 chown root:root /root/.ssh
3936 chmod 700 /root/.ssh
3937 touch /root/.ssh/authorized_keys
3938 chown root:root /root/.ssh/authorized_keys
3939 chmod 600 /root/.ssh/authorized_keys
3940 # make centos with selinux happy
3941 which restorecon && restorecon -Rv /root/.ssh
3942 echo '{key}' >> /root/.ssh/authorized_keys
3943 else
3944 touch /root/.ssh/authorized_keys
3945 chown root:root /root/.ssh/authorized_keys
3946 chmod 600 /root/.ssh/authorized_keys
3947 echo '{key}' >> /root/.ssh/authorized_keys
3948 fi
3949 if [ -d /home/{user_name} ];then
3950 if [ ! -d /home/{user_name}/.ssh ];then
3951 mkdir /home/{user_name}/.ssh
3952 chown {user_name}:{user_name} /home/{user_name}/.ssh
3953 chmod 700 /home/{user_name}/.ssh
3954 touch /home/{user_name}/.ssh/authorized_keys
3955 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3956 chmod 600 /home/{user_name}/.ssh/authorized_keys
3957 # make centos with selinux happy
3958 which restorecon && restorecon -Rv /home/{user_name}/.ssh
3959 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3960 else
3961 touch /home/{user_name}/.ssh/authorized_keys
3962 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
3963 chmod 600 /home/{user_name}/.ssh/authorized_keys
3964 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
3965 fi
3966 fi
3967 fi""".format(key=key, user_name=user_name, user_key=user_key)
3968
3969 for vm in vapp._get_vms():
3970 vm_name = vm.name
3971 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
3972 if isinstance(task, GenericTask):
3973 self.vca.block_until_completed(task)
3974 self.logger.info("cloud_init : customized guest os task "\
3975 "completed for VM {}".format(vm_name))
3976 else:
3977 self.logger.error("cloud_init : task for customized guest os"\
3978 "failed for VM {}".format(vm_name))
3979 except Exception as exp:
3980 self.logger.error("cloud_init : exception occurred while injecting "\
3981 "ssh-key")
3982 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
3983 "ssh-key".format(exp))
3984
3985
3986 def add_new_disk(self, vapp_uuid, disk_size):
3987 """
3988 Method to create an empty vm disk
3989
3990 Args:
3991 vapp_uuid - is vapp identifier.
3992 disk_size - size of disk to be created in GB
3993
3994 Returns:
3995 None
3996 """
3997 status = False
3998 vm_details = None
3999 try:
4000 #Disk size in GB, convert it into MB
4001 if disk_size is not None:
4002 disk_size_mb = int(disk_size) * 1024
4003 vm_details = self.get_vapp_details_rest(vapp_uuid)
4004
4005 if vm_details and "vm_virtual_hardware" in vm_details:
4006 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4007 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4008 status = self.add_new_disk_rest(disk_href, disk_size_mb)
4009
4010 except Exception as exp:
4011 msg = "Error occurred while creating new disk {}.".format(exp)
4012 self.rollback_newvm(vapp_uuid, msg)
4013
4014 if status:
4015 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
4016 else:
4017 #If failed to add disk, delete VM
4018 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
4019 self.rollback_newvm(vapp_uuid, msg)
4020
4021
4022 def add_new_disk_rest(self, disk_href, disk_size_mb):
4023 """
4024 Retrives vApp Disks section & add new empty disk
4025
4026 Args:
4027 disk_href: Disk section href to addd disk
4028 disk_size_mb: Disk size in MB
4029
4030 Returns: Status of add new disk task
4031 """
4032 status = False
4033 if self.vca.vcloud_session and self.vca.vcloud_session.organization:
4034 response = Http.get(url=disk_href,
4035 headers=self.vca.vcloud_session.get_vcloud_headers(),
4036 verify=self.vca.verify,
4037 logger=self.vca.logger)
4038
4039 if response.status_code == 403:
4040 response = self.retry_rest('GET', disk_href)
4041
4042 if response.status_code != requests.codes.ok:
4043 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
4044 .format(disk_href, response.status_code))
4045 return status
4046 try:
4047 #Find but type & max of instance IDs assigned to disks
4048 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4049 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4050 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4051 instance_id = 0
4052 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4053 if item.find("rasd:Description",namespaces).text == "Hard disk":
4054 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
4055 if inst_id > instance_id:
4056 instance_id = inst_id
4057 disk_item = item.find("rasd:HostResource" ,namespaces)
4058 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
4059 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
4060
4061 instance_id = instance_id + 1
4062 new_item = """<Item>
4063 <rasd:Description>Hard disk</rasd:Description>
4064 <rasd:ElementName>New disk</rasd:ElementName>
4065 <rasd:HostResource
4066 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
4067 vcloud:capacity="{}"
4068 vcloud:busSubType="{}"
4069 vcloud:busType="{}"></rasd:HostResource>
4070 <rasd:InstanceID>{}</rasd:InstanceID>
4071 <rasd:ResourceType>17</rasd:ResourceType>
4072 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
4073
4074 new_data = response.content
4075 #Add new item at the bottom
4076 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
4077
4078 # Send PUT request to modify virtual hardware section with new disk
4079 headers = self.vca.vcloud_session.get_vcloud_headers()
4080 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4081
4082 response = Http.put(url=disk_href,
4083 data=new_data,
4084 headers=headers,
4085 verify=self.vca.verify, logger=self.logger)
4086
4087 if response.status_code == 403:
4088 add_headers = {'Content-Type': headers['Content-Type']}
4089 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
4090
4091 if response.status_code != 202:
4092 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
4093 .format(disk_href, response.status_code, response.content))
4094 else:
4095 add_disk_task = taskType.parseString(response.content, True)
4096 if type(add_disk_task) is GenericTask:
4097 status = self.vca.block_until_completed(add_disk_task)
4098 if not status:
4099 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
4100
4101 except Exception as exp:
4102 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
4103
4104 return status
4105
4106
4107 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
4108 """
4109 Method to add existing disk to vm
4110 Args :
4111 catalogs - List of VDC catalogs
4112 image_id - Catalog ID
4113 template_name - Name of template in catalog
4114 vapp_uuid - UUID of vApp
4115 Returns:
4116 None
4117 """
4118 disk_info = None
4119 vcenter_conect, content = self.get_vcenter_content()
4120 #find moref-id of vm in image
4121 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
4122 image_id=image_id,
4123 )
4124
4125 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
4126 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
4127 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
4128 if catalog_vm_moref_id:
4129 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
4130 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
4131 if catalog_vm_obj:
4132 #find existing disk
4133 disk_info = self.find_disk(catalog_vm_obj)
4134 else:
4135 exp_msg = "No VM with image id {} found".format(image_id)
4136 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4137 else:
4138 exp_msg = "No Image found with image ID {} ".format(image_id)
4139 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
4140
4141 if disk_info:
4142 self.logger.info("Existing disk_info : {}".format(disk_info))
4143 #get VM
4144 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4145 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
4146 if vm_obj:
4147 status = self.add_disk(vcenter_conect=vcenter_conect,
4148 vm=vm_obj,
4149 disk_info=disk_info,
4150 size=size,
4151 vapp_uuid=vapp_uuid
4152 )
4153 if status:
4154 self.logger.info("Disk from image id {} added to {}".format(image_id,
4155 vm_obj.config.name)
4156 )
4157 else:
4158 msg = "No disk found with image id {} to add in VM {}".format(
4159 image_id,
4160 vm_obj.config.name)
4161 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
4162
4163
4164 def find_disk(self, vm_obj):
4165 """
4166 Method to find details of existing disk in VM
4167 Args :
4168 vm_obj - vCenter object of VM
4169 image_id - Catalog ID
4170 Returns:
4171 disk_info : dict of disk details
4172 """
4173 disk_info = {}
4174 if vm_obj:
4175 try:
4176 devices = vm_obj.config.hardware.device
4177 for device in devices:
4178 if type(device) is vim.vm.device.VirtualDisk:
4179 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
4180 disk_info["full_path"] = device.backing.fileName
4181 disk_info["datastore"] = device.backing.datastore
4182 disk_info["capacityKB"] = device.capacityInKB
4183 break
4184 except Exception as exp:
4185 self.logger.error("find_disk() : exception occurred while "\
4186 "getting existing disk details :{}".format(exp))
4187 return disk_info
4188
4189
4190 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
4191 """
4192 Method to add existing disk in VM
4193 Args :
4194 vcenter_conect - vCenter content object
4195 vm - vCenter vm object
4196 disk_info : dict of disk details
4197 Returns:
4198 status : status of add disk task
4199 """
4200 datastore = disk_info["datastore"] if "datastore" in disk_info else None
4201 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
4202 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
4203 if size is not None:
4204 #Convert size from GB to KB
4205 sizeKB = int(size) * 1024 * 1024
4206 #compare size of existing disk and user given size.Assign whicherver is greater
4207 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
4208 sizeKB, capacityKB))
4209 if sizeKB > capacityKB:
4210 capacityKB = sizeKB
4211
4212 if datastore and fullpath and capacityKB:
4213 try:
4214 spec = vim.vm.ConfigSpec()
4215 # get all disks on a VM, set unit_number to the next available
4216 unit_number = 0
4217 for dev in vm.config.hardware.device:
4218 if hasattr(dev.backing, 'fileName'):
4219 unit_number = int(dev.unitNumber) + 1
4220 # unit_number 7 reserved for scsi controller
4221 if unit_number == 7:
4222 unit_number += 1
4223 if isinstance(dev, vim.vm.device.VirtualDisk):
4224 #vim.vm.device.VirtualSCSIController
4225 controller_key = dev.controllerKey
4226
4227 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
4228 unit_number, controller_key))
4229 # add disk here
4230 dev_changes = []
4231 disk_spec = vim.vm.device.VirtualDeviceSpec()
4232 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4233 disk_spec.device = vim.vm.device.VirtualDisk()
4234 disk_spec.device.backing = \
4235 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
4236 disk_spec.device.backing.thinProvisioned = True
4237 disk_spec.device.backing.diskMode = 'persistent'
4238 disk_spec.device.backing.datastore = datastore
4239 disk_spec.device.backing.fileName = fullpath
4240
4241 disk_spec.device.unitNumber = unit_number
4242 disk_spec.device.capacityInKB = capacityKB
4243 disk_spec.device.controllerKey = controller_key
4244 dev_changes.append(disk_spec)
4245 spec.deviceChange = dev_changes
4246 task = vm.ReconfigVM_Task(spec=spec)
4247 status = self.wait_for_vcenter_task(task, vcenter_conect)
4248 return status
4249 except Exception as exp:
4250 exp_msg = "add_disk() : exception {} occurred while adding disk "\
4251 "{} to vm {}".format(exp,
4252 fullpath,
4253 vm.config.name)
4254 self.rollback_newvm(vapp_uuid, exp_msg)
4255 else:
4256 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
4257 self.rollback_newvm(vapp_uuid, msg)
4258
4259
4260 def get_vcenter_content(self):
4261 """
4262 Get the vsphere content object
4263 """
4264 try:
4265 vm_vcenter_info = self.get_vm_vcenter_info()
4266 except Exception as exp:
4267 self.logger.error("Error occurred while getting vCenter infromationn"\
4268 " for VM : {}".format(exp))
4269 raise vimconn.vimconnException(message=exp)
4270
4271 context = None
4272 if hasattr(ssl, '_create_unverified_context'):
4273 context = ssl._create_unverified_context()
4274
4275 vcenter_conect = SmartConnect(
4276 host=vm_vcenter_info["vm_vcenter_ip"],
4277 user=vm_vcenter_info["vm_vcenter_user"],
4278 pwd=vm_vcenter_info["vm_vcenter_password"],
4279 port=int(vm_vcenter_info["vm_vcenter_port"]),
4280 sslContext=context
4281 )
4282 atexit.register(Disconnect, vcenter_conect)
4283 content = vcenter_conect.RetrieveContent()
4284 return vcenter_conect, content
4285
4286
4287 def get_vm_moref_id(self, vapp_uuid):
4288 """
4289 Get the moref_id of given VM
4290 """
4291 try:
4292 if vapp_uuid:
4293 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
4294 if vm_details and "vm_vcenter_info" in vm_details:
4295 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
4296
4297 return vm_moref_id
4298
4299 except Exception as exp:
4300 self.logger.error("Error occurred while getting VM moref ID "\
4301 " for VM : {}".format(exp))
4302 return None
4303
4304
4305 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
4306 """
4307 Method to get vApp template details
4308 Args :
4309 catalogs - list of VDC catalogs
4310 image_id - Catalog ID to find
4311 template_name : template name in catalog
4312 Returns:
4313 parsed_respond : dict of vApp tempalte details
4314 """
4315 parsed_response = {}
4316
4317 vca = self.connect_as_admin()
4318 if not vca:
4319 raise vimconn.vimconnConnectionException("self.connect() is failed")
4320
4321 try:
4322 catalog = self.get_catalog_obj(image_id, catalogs)
4323 if catalog:
4324 template_name = self.get_catalogbyid(image_id, catalogs)
4325 catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
4326 if len(catalog_items) == 1:
4327 response = Http.get(catalog_items[0].get_href(),
4328 headers=vca.vcloud_session.get_vcloud_headers(),
4329 verify=vca.verify,
4330 logger=vca.logger)
4331 catalogItem = XmlElementTree.fromstring(response.content)
4332 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
4333 vapp_tempalte_href = entity.get("href")
4334 #get vapp details and parse moref id
4335
4336 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4337 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4338 'vmw': 'http://www.vmware.com/schema/ovf',
4339 'vm': 'http://www.vmware.com/vcloud/v1.5',
4340 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4341 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
4342 'xmlns':"http://www.vmware.com/vcloud/v1.5"
4343 }
4344
4345 if vca.vcloud_session and vca.vcloud_session.organization:
4346 response = Http.get(url=vapp_tempalte_href,
4347 headers=vca.vcloud_session.get_vcloud_headers(),
4348 verify=vca.verify,
4349 logger=vca.logger
4350 )
4351
4352 if response.status_code != requests.codes.ok:
4353 self.logger.debug("REST API call {} failed. Return status code {}".format(
4354 vapp_tempalte_href, response.status_code))
4355
4356 else:
4357 xmlroot_respond = XmlElementTree.fromstring(response.content)
4358 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4359 if children_section is not None:
4360 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4361 if vCloud_extension_section is not None:
4362 vm_vcenter_info = {}
4363 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4364 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4365 if vmext is not None:
4366 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4367 parsed_response["vm_vcenter_info"]= vm_vcenter_info
4368
4369 except Exception as exp :
4370 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4371
4372 return parsed_response
4373
4374
4375 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
4376 """
4377 Method to delete vApp
4378 Args :
4379 vapp_uuid - vApp UUID
4380 msg - Error message to be logged
4381 exp_type : Exception type
4382 Returns:
4383 None
4384 """
4385 if vapp_uuid:
4386 status = self.delete_vminstance(vapp_uuid)
4387 else:
4388 msg = "No vApp ID"
4389 self.logger.error(msg)
4390 if exp_type == "Genric":
4391 raise vimconn.vimconnException(msg)
4392 elif exp_type == "NotFound":
4393 raise vimconn.vimconnNotFoundException(message=msg)
4394
4395 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
4396 """
4397 Method to attach SRIOV adapters to VM
4398
4399 Args:
4400 vapp_uuid - uuid of vApp/VM
4401 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
4402 vmname_andid - vmname
4403
4404 Returns:
4405 The status of add SRIOV adapter task , vm object and
4406 vcenter_conect object
4407 """
4408 vm_obj = None
4409 vcenter_conect, content = self.get_vcenter_content()
4410 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4411
4412 if vm_moref_id:
4413 try:
4414 no_of_sriov_devices = len(sriov_nets)
4415 if no_of_sriov_devices > 0:
4416 #Get VM and its host
4417 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4418 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4419 if host_obj and vm_obj:
4420 #get SRIOV devies from host on which vapp is currently installed
4421 avilable_sriov_devices = self.get_sriov_devices(host_obj,
4422 no_of_sriov_devices,
4423 )
4424
4425 if len(avilable_sriov_devices) == 0:
4426 #find other hosts with active pci devices
4427 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
4428 content,
4429 no_of_sriov_devices,
4430 )
4431
4432 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
4433 #Migrate vm to the host where SRIOV devices are available
4434 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
4435 new_host_obj))
4436 task = self.relocate_vm(new_host_obj, vm_obj)
4437 if task is not None:
4438 result = self.wait_for_vcenter_task(task, vcenter_conect)
4439 self.logger.info("Migrate VM status: {}".format(result))
4440 host_obj = new_host_obj
4441 else:
4442 self.logger.info("Fail to migrate VM : {}".format(result))
4443 raise vimconn.vimconnNotFoundException(
4444 "Fail to migrate VM : {} to host {}".format(
4445 vmname_andid,
4446 new_host_obj)
4447 )
4448
4449 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
4450 #Add SRIOV devices one by one
4451 for sriov_net in sriov_nets:
4452 network_name = sriov_net.get('net_id')
4453 dvs_portgr_name = self.create_dvPort_group(network_name)
4454 if sriov_net.get('type') == "VF":
4455 #add vlan ID ,Modify portgroup for vlan ID
4456 self.configure_vlanID(content, vcenter_conect, network_name)
4457
4458 task = self.add_sriov_to_vm(content,
4459 vm_obj,
4460 host_obj,
4461 network_name,
4462 avilable_sriov_devices[0]
4463 )
4464 if task:
4465 status= self.wait_for_vcenter_task(task, vcenter_conect)
4466 if status:
4467 self.logger.info("Added SRIOV {} to VM {}".format(
4468 no_of_sriov_devices,
4469 str(vm_obj)))
4470 else:
4471 self.logger.error("Fail to add SRIOV {} to VM {}".format(
4472 no_of_sriov_devices,
4473 str(vm_obj)))
4474 raise vimconn.vimconnUnexpectedResponse(
4475 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
4476 )
4477 return True, vm_obj, vcenter_conect
4478 else:
4479 self.logger.error("Currently there is no host with"\
4480 " {} number of avaialble SRIOV "\
4481 "VFs required for VM {}".format(
4482 no_of_sriov_devices,
4483 vmname_andid)
4484 )
4485 raise vimconn.vimconnNotFoundException(
4486 "Currently there is no host with {} "\
4487 "number of avaialble SRIOV devices required for VM {}".format(
4488 no_of_sriov_devices,
4489 vmname_andid))
4490 else:
4491 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
4492
4493 except vmodl.MethodFault as error:
4494 self.logger.error("Error occurred while adding SRIOV {} ",error)
4495 return None, vm_obj, vcenter_conect
4496
4497
4498 def get_sriov_devices(self,host, no_of_vfs):
4499 """
4500 Method to get the details of SRIOV devices on given host
4501 Args:
4502 host - vSphere host object
4503 no_of_vfs - number of VFs needed on host
4504
4505 Returns:
4506 array of SRIOV devices
4507 """
4508 sriovInfo=[]
4509 if host:
4510 for device in host.config.pciPassthruInfo:
4511 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
4512 if device.numVirtualFunction >= no_of_vfs:
4513 sriovInfo.append(device)
4514 break
4515 return sriovInfo
4516
4517
4518 def get_host_and_sriov_devices(self, content, no_of_vfs):
4519 """
4520 Method to get the details of SRIOV devices infromation on all hosts
4521
4522 Args:
4523 content - vSphere host object
4524 no_of_vfs - number of pci VFs needed on host
4525
4526 Returns:
4527 array of SRIOV devices and host object
4528 """
4529 host_obj = None
4530 sriov_device_objs = None
4531 try:
4532 if content:
4533 container = content.viewManager.CreateContainerView(content.rootFolder,
4534 [vim.HostSystem], True)
4535 for host in container.view:
4536 devices = self.get_sriov_devices(host, no_of_vfs)
4537 if devices:
4538 host_obj = host
4539 sriov_device_objs = devices
4540 break
4541 except Exception as exp:
4542 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
4543
4544 return host_obj,sriov_device_objs
4545
4546
4547 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
4548 """
4549 Method to add SRIOV adapter to vm
4550
4551 Args:
4552 host_obj - vSphere host object
4553 vm_obj - vSphere vm object
4554 content - vCenter content object
4555 network_name - name of distributed virtaul portgroup
4556 sriov_device - SRIOV device info
4557
4558 Returns:
4559 task object
4560 """
4561 devices = []
4562 vnic_label = "sriov nic"
4563 try:
4564 dvs_portgr = self.get_dvport_group(network_name)
4565 network_name = dvs_portgr.name
4566 nic = vim.vm.device.VirtualDeviceSpec()
4567 # VM device
4568 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
4569 nic.device = vim.vm.device.VirtualSriovEthernetCard()
4570 nic.device.addressType = 'assigned'
4571 #nic.device.key = 13016
4572 nic.device.deviceInfo = vim.Description()
4573 nic.device.deviceInfo.label = vnic_label
4574 nic.device.deviceInfo.summary = network_name
4575 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
4576
4577 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
4578 nic.device.backing.deviceName = network_name
4579 nic.device.backing.useAutoDetect = False
4580 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
4581 nic.device.connectable.startConnected = True
4582 nic.device.connectable.allowGuestControl = True
4583
4584 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
4585 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
4586 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
4587
4588 devices.append(nic)
4589 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
4590 task = vm_obj.ReconfigVM_Task(vmconf)
4591 return task
4592 except Exception as exp:
4593 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
4594 return None
4595
4596
4597 def create_dvPort_group(self, network_name):
4598 """
4599 Method to create disributed virtual portgroup
4600
4601 Args:
4602 network_name - name of network/portgroup
4603
4604 Returns:
4605 portgroup key
4606 """
4607 try:
4608 new_network_name = [network_name, '-', str(uuid.uuid4())]
4609 network_name=''.join(new_network_name)
4610 vcenter_conect, content = self.get_vcenter_content()
4611
4612 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
4613 if dv_switch:
4614 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4615 dv_pg_spec.name = network_name
4616
4617 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
4618 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4619 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
4620 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
4621 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
4622 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
4623
4624 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
4625 self.wait_for_vcenter_task(task, vcenter_conect)
4626
4627 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
4628 if dvPort_group:
4629 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
4630 return dvPort_group.key
4631 else:
4632 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
4633
4634 except Exception as exp:
4635 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
4636 " : {}".format(network_name, exp))
4637 return None
4638
4639 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
4640 """
4641 Method to reconfigure disributed virtual portgroup
4642
4643 Args:
4644 dvPort_group_name - name of disributed virtual portgroup
4645 content - vCenter content object
4646 config_info - disributed virtual portgroup configuration
4647
4648 Returns:
4649 task object
4650 """
4651 try:
4652 dvPort_group = self.get_dvport_group(dvPort_group_name)
4653 if dvPort_group:
4654 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
4655 dv_pg_spec.configVersion = dvPort_group.config.configVersion
4656 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
4657 if "vlanID" in config_info:
4658 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
4659 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
4660
4661 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
4662 return task
4663 else:
4664 return None
4665 except Exception as exp:
4666 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
4667 " : {}".format(dvPort_group_name, exp))
4668 return None
4669
4670
4671 def destroy_dvport_group(self , dvPort_group_name):
4672 """
4673 Method to destroy disributed virtual portgroup
4674
4675 Args:
4676 network_name - name of network/portgroup
4677
4678 Returns:
4679 True if portgroup successfully got deleted else false
4680 """
4681 vcenter_conect, content = self.get_vcenter_content()
4682 try:
4683 status = None
4684 dvPort_group = self.get_dvport_group(dvPort_group_name)
4685 if dvPort_group:
4686 task = dvPort_group.Destroy_Task()
4687 status = self.wait_for_vcenter_task(task, vcenter_conect)
4688 return status
4689 except vmodl.MethodFault as exp:
4690 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
4691 exp, dvPort_group_name))
4692 return None
4693
4694
4695 def get_dvport_group(self, dvPort_group_name):
4696 """
4697 Method to get disributed virtual portgroup
4698
4699 Args:
4700 network_name - name of network/portgroup
4701
4702 Returns:
4703 portgroup object
4704 """
4705 vcenter_conect, content = self.get_vcenter_content()
4706 dvPort_group = None
4707 try:
4708 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
4709 for item in container.view:
4710 if item.key == dvPort_group_name:
4711 dvPort_group = item
4712 break
4713 return dvPort_group
4714 except vmodl.MethodFault as exp:
4715 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4716 exp, dvPort_group_name))
4717 return None
4718
4719 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
4720 """
4721 Method to get disributed virtual portgroup vlanID
4722
4723 Args:
4724 network_name - name of network/portgroup
4725
4726 Returns:
4727 vlan ID
4728 """
4729 vlanId = None
4730 try:
4731 dvPort_group = self.get_dvport_group(dvPort_group_name)
4732 if dvPort_group:
4733 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
4734 except vmodl.MethodFault as exp:
4735 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
4736 exp, dvPort_group_name))
4737 return vlanId
4738
4739
4740 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
4741 """
4742 Method to configure vlanID in disributed virtual portgroup vlanID
4743
4744 Args:
4745 network_name - name of network/portgroup
4746
4747 Returns:
4748 None
4749 """
4750 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
4751 if vlanID == 0:
4752 #configure vlanID
4753 vlanID = self.genrate_vlanID(dvPort_group_name)
4754 config = {"vlanID":vlanID}
4755 task = self.reconfig_portgroup(content, dvPort_group_name,
4756 config_info=config)
4757 if task:
4758 status= self.wait_for_vcenter_task(task, vcenter_conect)
4759 if status:
4760 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
4761 dvPort_group_name,vlanID))
4762 else:
4763 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
4764 dvPort_group_name, vlanID))
4765
4766
4767 def genrate_vlanID(self, network_name):
4768 """
4769 Method to get unused vlanID
4770 Args:
4771 network_name - name of network/portgroup
4772 Returns:
4773 vlanID
4774 """
4775 vlan_id = None
4776 used_ids = []
4777 if self.config.get('vlanID_range') == None:
4778 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
4779 "at config value before creating sriov network with vlan tag")
4780 if "used_vlanIDs" not in self.persistent_info:
4781 self.persistent_info["used_vlanIDs"] = {}
4782 else:
4783 used_ids = self.persistent_info["used_vlanIDs"].values()
4784
4785 for vlanID_range in self.config.get('vlanID_range'):
4786 start_vlanid , end_vlanid = vlanID_range.split("-")
4787 if start_vlanid > end_vlanid:
4788 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
4789 vlanID_range))
4790
4791 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
4792 if id not in used_ids:
4793 vlan_id = id
4794 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
4795 return vlan_id
4796 if vlan_id is None:
4797 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
4798
4799
4800 def get_obj(self, content, vimtype, name):
4801 """
4802 Get the vsphere object associated with a given text name
4803 """
4804 obj = None
4805 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
4806 for item in container.view:
4807 if item.name == name:
4808 obj = item
4809 break
4810 return obj
4811
4812
4813 def insert_media_to_vm(self, vapp, image_id):
4814 """
4815 Method to insert media CD-ROM (ISO image) from catalog to vm.
4816 vapp - vapp object to get vm id
4817 Image_id - image id for cdrom to be inerted to vm
4818 """
4819 # create connection object
4820 vca = self.connect()
4821 try:
4822 # fetching catalog details
4823 rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
4824 response = Http.get(url=rest_url,
4825 headers=vca.vcloud_session.get_vcloud_headers(),
4826 verify=vca.verify,
4827 logger=vca.logger)
4828
4829 if response.status_code != 200:
4830 self.logger.error("REST call {} failed reason : {}"\
4831 "status code : {}".format(url_rest_call,
4832 response.content,
4833 response.status_code))
4834 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
4835 "catalog details")
4836 # searching iso name and id
4837 iso_name,media_id = self.get_media_details(vca, response.content)
4838
4839 if iso_name and media_id:
4840 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
4841 <ns6:MediaInsertOrEjectParams
4842 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
4843 <ns6:Media
4844 type="application/vnd.vmware.vcloud.media+xml"
4845 name="{}.iso"
4846 id="urn:vcloud:media:{}"
4847 href="https://{}/api/media/{}"/>
4848 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
4849 vca.host,media_id)
4850
4851 for vms in vapp._get_vms():
4852 vm_id = (vms.id).split(':')[-1]
4853
4854 headers = vca.vcloud_session.get_vcloud_headers()
4855 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
4856 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
4857
4858 response = Http.post(url=rest_url,
4859 headers=headers,
4860 data=data,
4861 verify=vca.verify,
4862 logger=vca.logger)
4863
4864 if response.status_code != 202:
4865 self.logger.error("Failed to insert CD-ROM to vm")
4866 raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
4867 "ISO image to vm")
4868 else:
4869 task = taskType.parseString(response.content, True)
4870 if isinstance(task, GenericTask):
4871 vca.block_until_completed(task)
4872 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
4873 " image to vm {}".format(vm_id))
4874 except Exception as exp:
4875 self.logger.error("insert_media_to_vm() : exception occurred "\
4876 "while inserting media CD-ROM")
4877 raise vimconn.vimconnException(message=exp)
4878
4879
4880 def get_media_details(self, vca, content):
4881 """
4882 Method to get catalog item details
4883 vca - connection object
4884 content - Catalog details
4885 Return - Media name, media id
4886 """
4887 cataloghref_list = []
4888 try:
4889 if content:
4890 vm_list_xmlroot = XmlElementTree.fromstring(content)
4891 for child in vm_list_xmlroot.iter():
4892 if 'CatalogItem' in child.tag:
4893 cataloghref_list.append(child.attrib.get('href'))
4894 if cataloghref_list is not None:
4895 for href in cataloghref_list:
4896 if href:
4897 response = Http.get(url=href,
4898 headers=vca.vcloud_session.get_vcloud_headers(),
4899 verify=vca.verify,
4900 logger=vca.logger)
4901 if response.status_code != 200:
4902 self.logger.error("REST call {} failed reason : {}"\
4903 "status code : {}".format(href,
4904 response.content,
4905 response.status_code))
4906 raise vimconn.vimconnException("get_media_details : Failed to get "\
4907 "catalogitem details")
4908 list_xmlroot = XmlElementTree.fromstring(response.content)
4909 for child in list_xmlroot.iter():
4910 if 'Entity' in child.tag:
4911 if 'media' in child.attrib.get('href'):
4912 name = child.attrib.get('name')
4913 media_id = child.attrib.get('href').split('/').pop()
4914 return name,media_id
4915 else:
4916 self.logger.debug("Media name and id not found")
4917 return False,False
4918 except Exception as exp:
4919 self.logger.error("get_media_details : exception occurred "\
4920 "getting media details")
4921 raise vimconn.vimconnException(message=exp)
4922
4923
4924 def retry_rest(self, api, url, add_headers=None, data=None):
4925 """ Method to get Token & retry respective REST request
4926 Args:
4927 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
4928 url - request url to be used
4929 add_headers - Additional headers (optional)
4930 data - Request payload data to be passed in request
4931 Returns:
4932 response - Response of request
4933 """
4934 response = None
4935
4936 #Get token
4937 self.get_token()
4938
4939 headers=self.vca.vcloud_session.get_vcloud_headers()
4940
4941 if add_headers:
4942 headers.update(add_headers)
4943
4944 if api == 'GET':
4945 response = Http.get(url=url,
4946 headers=headers,
4947 verify=self.vca.verify,
4948 logger=self.vca.logger)
4949 return response
4950 elif api == 'PUT':
4951 if headers:
4952 headers.append
4953 response = Http.put(url=url,
4954 data=data,
4955 headers=headers,
4956 verify=self.vca.verify, logger=self.logger)
4957 return response
4958 elif api == 'POST':
4959 response = Http.post(url=url,
4960 headers=headers,
4961 verify=self.vca.verify,
4962 logger=self.vca.logger)
4963
4964 def get_token(self):
4965 """ Generate a new token if expired
4966
4967 Returns:
4968 The return vca object that letter can be used to connect to vCloud director as admin for VDC
4969 """
4970 vca = None
4971
4972 try:
4973 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
4974 self.user,
4975 self.org_name))
4976 vca = VCA(host=self.url,
4977 username=self.user,
4978 service_type=STANDALONE,
4979 version=VCAVERSION,
4980 verify=False,
4981 log=False)
4982
4983 result = vca.login(password=self.passwd, org=self.org_name)
4984 if result is True:
4985 result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
4986 if result is True:
4987 self.logger.info(
4988 "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
4989 #Update vca
4990 self.vca = vca
4991 return
4992
4993 except:
4994 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
4995 "{} as user: {}".format(self.org_name, self.user))
4996
4997 if not vca or not result:
4998 raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
4999
5000
5001 def get_vdc_details(self):
5002 """ Get VDC details using pyVcloud Lib
5003
5004 Returns vdc object
5005 """
5006 vdc = self.vca.get_vdc(self.tenant_name)
5007
5008 #Retry once, if failed by refreshing token
5009 if vdc is None:
5010 self.get_token()
5011 vdc = self.vca.get_vdc(self.tenant_name)
5012
5013 return vdc
5014