f353264eee9ef490ed2262d93776c0e436fc175a
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 from osm_ro import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import itertools
37 import requests
38 import ssl
39 import atexit
40
41 from pyVmomi import vim, vmodl
42 from pyVim.connect import SmartConnect, Disconnect
43
44 from xml.etree import ElementTree as XmlElementTree
45 from lxml import etree as lxmlElementTree
46
47 import yaml
48 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
49 from pyvcloud.vcd.vdc import VDC
50 from pyvcloud.vcd.org import Org
51 import re
52 from pyvcloud.vcd.vapp import VApp
53 from xml.sax.saxutils import escape
54 import logging
55 import json
56 import time
57 import uuid
58 # import httplib
59 #For python3
60 #import http.client # TODO py3 check
61 import hashlib
62 import socket
63 import struct
64 import netaddr
65 import random
66
67 # global variable for vcd connector type
68 STANDALONE = 'standalone'
69
70 # key for flavor dicts
71 FLAVOR_RAM_KEY = 'ram'
72 FLAVOR_VCPUS_KEY = 'vcpus'
73 FLAVOR_DISK_KEY = 'disk'
74 DEFAULT_IP_PROFILE = {'dhcp_count':50,
75 'dhcp_enabled':True,
76 'ip_version':"IPv4"
77 }
78 # global variable for wait time
79 INTERVAL_TIME = 5
80 MAX_WAIT_TIME = 1800
81
82 API_VERSION = '27.0'
83
84 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
85 __date__ = "$09-Mar-2018 11:09:29$"
86 __version__ = '0.2'
87
88 # -1: "Could not be created",
89 # 0: "Unresolved",
90 # 1: "Resolved",
91 # 2: "Deployed",
92 # 3: "Suspended",
93 # 4: "Powered on",
94 # 5: "Waiting for user input",
95 # 6: "Unknown state",
96 # 7: "Unrecognized state",
97 # 8: "Powered off",
98 # 9: "Inconsistent state",
99 # 10: "Children do not all have the same status",
100 # 11: "Upload initiated, OVF descriptor pending",
101 # 12: "Upload initiated, copying contents",
102 # 13: "Upload initiated , disk contents pending",
103 # 14: "Upload has been quarantined",
104 # 15: "Upload quarantine period has expired"
105
106 # mapping vCD status to MANO
107 vcdStatusCode2manoFormat = {4: 'ACTIVE',
108 7: 'PAUSED',
109 3: 'SUSPENDED',
110 8: 'INACTIVE',
111 12: 'BUILD',
112 -1: 'ERROR',
113 14: 'DELETED'}
114
115 #
116 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
117 'ERROR': 'ERROR', 'DELETED': 'DELETED'
118 }
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '{}'".format(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '{}'".format(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False)
309 client_as_admin.set_highest_supported_version()
310 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
311 except Exception as e:
312 raise vimconn.vimconnException(
313 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
314
315 return client_as_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return client object that latter can be used to connect to vCloud director as admin for VDC
322 """
323 try:
324 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
325 self.user,
326 self.org_name))
327 host = self.url
328 client = Client(host, verify_ssl_certs=False)
329 client.set_highest_supported_version()
330 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
331 except:
332 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
333 "{} as user: {}".format(self.org_name, self.user))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except:
393 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
449 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
450 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
451 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
452 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
453
454 response = self.perform_request(req_type='DELETE',
455 url=vdc_remove_href,
456 headers=headers)
457
458 if response.status_code == 202:
459 time.sleep(5)
460 return tenant_id
461 else:
462 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
463 "Return status code {}".format(vdc_remove_href,
464 response.status_code))
465 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
466 else:
467 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
468 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
469
470
471 def get_tenant_list(self, filter_dict={}):
472 """Obtain tenants of VIM
473 filter_dict can contain the following keys:
474 name: filter by tenant name
475 id: filter by tenant uuid/id
476 <other VIM specific>
477 Returns the tenant list of dictionaries:
478 [{'name':'<name>, 'id':'<id>, ...}, ...]
479
480 """
481 org_dict = self.get_org(self.org_uuid)
482 vdcs_dict = org_dict['vdcs']
483
484 vdclist = []
485 try:
486 for k in vdcs_dict:
487 entry = {'name': vdcs_dict[k], 'id': k}
488 # if caller didn't specify dictionary we return all tenants.
489 if filter_dict is not None and filter_dict:
490 filtered_entry = entry.copy()
491 filtered_dict = set(entry.keys()) - set(filter_dict)
492 for unwanted_key in filtered_dict: del entry[unwanted_key]
493 if filter_dict == entry:
494 vdclist.append(filtered_entry)
495 else:
496 vdclist.append(entry)
497 except:
498 self.logger.debug("Error in get_tenant_list()")
499 self.logger.debug(traceback.format_exc())
500 raise vimconn.vimconnException("Incorrect state. {}")
501
502 return vdclist
503
504 def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
505 """Adds a tenant network to VIM
506 Params:
507 'net_name': name of the network
508 'net_type': one of:
509 'bridge': overlay isolated network
510 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
511 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
512 'ip_profile': is a dict containing the IP parameters of the network
513 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
514 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
515 'gateway_address': (Optional) ip_schema, that is X.X.X.X
516 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
517 'dhcp_enabled': True or False
518 'dhcp_start_address': ip_schema, first IP to grant
519 'dhcp_count': number of IPs to grant.
520 'shared': if this network can be seen/use by other tenants/organization
521 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
522 Returns a tuple with the network identifier and created_items, or raises an exception on error
523 created_items can be None or a dictionary where this method can include key-values that will be passed to
524 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
525 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
526 as not present.
527 """
528
529 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
530 .format(net_name, net_type, ip_profile, shared, provider_network_profile))
531 vlan = None
532 if provider_network_profile:
533 vlan = provider_network_profile.get("segmentation-id")
534
535 created_items = {}
536 isshared = 'false'
537 if shared:
538 isshared = 'true'
539
540 # ############# Stub code for SRIOV #################
541 # if net_type == "data" or net_type == "ptp":
542 # if self.config.get('dv_switch_name') == None:
543 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
544 # network_uuid = self.create_dvPort_group(net_name)
545 parent_network_uuid = None
546
547 import traceback
548 traceback.print_stack()
549
550 if provider_network_profile is not None:
551 for k, v in provider_network_profile.items():
552 if k == 'physical_network':
553 parent_network_uuid = self.get_physical_network_by_name(v)
554
555 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
556 ip_profile=ip_profile, isshared=isshared,
557 parent_network_uuid=parent_network_uuid)
558 if network_uuid is not None:
559 return network_uuid, created_items
560 else:
561 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
562
563 def get_vcd_network_list(self):
564 """ Method available organization for a logged in tenant
565
566 Returns:
567 The return vca object that letter can be used to connect to vcloud direct as admin
568 """
569
570 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
571
572 if not self.tenant_name:
573 raise vimconn.vimconnConnectionException("Tenant name is empty.")
574
575 org, vdc = self.get_vdc_details()
576 if vdc is None:
577 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
578
579 vdc_uuid = vdc.get('id').split(":")[3]
580 if self.client._session:
581 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
582 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
583 response = self.perform_request(req_type='GET',
584 url=vdc.get('href'),
585 headers=headers)
586 if response.status_code != 200:
587 self.logger.error("Failed to get vdc content")
588 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
589 else:
590 content = XmlElementTree.fromstring(response.content)
591
592 network_list = []
593 try:
594 for item in content:
595 if item.tag.split('}')[-1] == 'AvailableNetworks':
596 for net in item:
597 response = self.perform_request(req_type='GET',
598 url=net.get('href'),
599 headers=headers)
600
601 if response.status_code != 200:
602 self.logger.error("Failed to get network content")
603 raise vimconn.vimconnNotFoundException("Failed to get network content")
604 else:
605 net_details = XmlElementTree.fromstring(response.content)
606
607 filter_dict = {}
608 net_uuid = net_details.get('id').split(":")
609 if len(net_uuid) != 4:
610 continue
611 else:
612 net_uuid = net_uuid[3]
613 # create dict entry
614 self.logger.debug("get_vcd_network_list(): Adding network {} "
615 "to a list vcd id {} network {}".format(net_uuid,
616 vdc_uuid,
617 net_details.get('name')))
618 filter_dict["name"] = net_details.get('name')
619 filter_dict["id"] = net_uuid
620 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
621 shared = True
622 else:
623 shared = False
624 filter_dict["shared"] = shared
625 filter_dict["tenant_id"] = vdc_uuid
626 if int(net_details.get('status')) == 1:
627 filter_dict["admin_state_up"] = True
628 else:
629 filter_dict["admin_state_up"] = False
630 filter_dict["status"] = "ACTIVE"
631 filter_dict["type"] = "bridge"
632 network_list.append(filter_dict)
633 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
634 except:
635 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
636 pass
637
638 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
639 return network_list
640
641 def get_network_list(self, filter_dict={}):
642 """Obtain tenant networks of VIM
643 Filter_dict can be:
644 name: network name OR/AND
645 id: network uuid OR/AND
646 shared: boolean OR/AND
647 tenant_id: tenant OR/AND
648 admin_state_up: boolean
649 status: 'ACTIVE'
650
651 [{key : value , key : value}]
652
653 Returns the network list of dictionaries:
654 [{<the fields at Filter_dict plus some VIM specific>}, ...]
655 List can be empty
656 """
657
658 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
659
660 if not self.tenant_name:
661 raise vimconn.vimconnConnectionException("Tenant name is empty.")
662
663 org, vdc = self.get_vdc_details()
664 if vdc is None:
665 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
666
667 try:
668 vdcid = vdc.get('id').split(":")[3]
669
670 if self.client._session:
671 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
672 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
673 response = self.perform_request(req_type='GET',
674 url=vdc.get('href'),
675 headers=headers)
676 if response.status_code != 200:
677 self.logger.error("Failed to get vdc content")
678 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
679 else:
680 content = XmlElementTree.fromstring(response.content)
681
682 network_list = []
683 for item in content:
684 if item.tag.split('}')[-1] == 'AvailableNetworks':
685 for net in item:
686 response = self.perform_request(req_type='GET',
687 url=net.get('href'),
688 headers=headers)
689
690 if response.status_code != 200:
691 self.logger.error("Failed to get network content")
692 raise vimconn.vimconnNotFoundException("Failed to get network content")
693 else:
694 net_details = XmlElementTree.fromstring(response.content)
695
696 filter_entry = {}
697 net_uuid = net_details.get('id').split(":")
698 if len(net_uuid) != 4:
699 continue
700 else:
701 net_uuid = net_uuid[3]
702 # create dict entry
703 self.logger.debug("get_network_list(): Adding net {}"
704 " to a list vcd id {} network {}".format(net_uuid,
705 vdcid,
706 net_details.get('name')))
707 filter_entry["name"] = net_details.get('name')
708 filter_entry["id"] = net_uuid
709 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
710 shared = True
711 else:
712 shared = False
713 filter_entry["shared"] = shared
714 filter_entry["tenant_id"] = vdcid
715 if int(net_details.get('status')) == 1:
716 filter_entry["admin_state_up"] = True
717 else:
718 filter_entry["admin_state_up"] = False
719 filter_entry["status"] = "ACTIVE"
720 filter_entry["type"] = "bridge"
721 filtered_entry = filter_entry.copy()
722
723 if filter_dict is not None and filter_dict:
724 # we remove all the key : value we don't care and match only
725 # respected field
726 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
727 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
728 if filter_dict == filter_entry:
729 network_list.append(filtered_entry)
730 else:
731 network_list.append(filtered_entry)
732 except Exception as e:
733 self.logger.debug("Error in get_network_list",exc_info=True)
734 if isinstance(e, vimconn.vimconnException):
735 raise
736 else:
737 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
738
739 self.logger.debug("Returning {}".format(network_list))
740 return network_list
741
742 def get_network(self, net_id):
743 """Method obtains network details of net_id VIM network
744 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
745
746 try:
747 org, vdc = self.get_vdc_details()
748 vdc_id = vdc.get('id').split(":")[3]
749 if self.client._session:
750 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
751 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
752 response = self.perform_request(req_type='GET',
753 url=vdc.get('href'),
754 headers=headers)
755 if response.status_code != 200:
756 self.logger.error("Failed to get vdc content")
757 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
758 else:
759 content = XmlElementTree.fromstring(response.content)
760
761 filter_dict = {}
762
763 for item in content:
764 if item.tag.split('}')[-1] == 'AvailableNetworks':
765 for net in item:
766 response = self.perform_request(req_type='GET',
767 url=net.get('href'),
768 headers=headers)
769
770 if response.status_code != 200:
771 self.logger.error("Failed to get network content")
772 raise vimconn.vimconnNotFoundException("Failed to get network content")
773 else:
774 net_details = XmlElementTree.fromstring(response.content)
775
776 vdc_network_id = net_details.get('id').split(":")
777 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
778 filter_dict["name"] = net_details.get('name')
779 filter_dict["id"] = vdc_network_id[3]
780 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
781 shared = True
782 else:
783 shared = False
784 filter_dict["shared"] = shared
785 filter_dict["tenant_id"] = vdc_id
786 if int(net_details.get('status')) == 1:
787 filter_dict["admin_state_up"] = True
788 else:
789 filter_dict["admin_state_up"] = False
790 filter_dict["status"] = "ACTIVE"
791 filter_dict["type"] = "bridge"
792 self.logger.debug("Returning {}".format(filter_dict))
793 return filter_dict
794 else:
795 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
796 except Exception as e:
797 self.logger.debug("Error in get_network")
798 self.logger.debug(traceback.format_exc())
799 if isinstance(e, vimconn.vimconnException):
800 raise
801 else:
802 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
803
804 return filter_dict
805
806 def delete_network(self, net_id, created_items=None):
807 """
808 Removes a tenant network from VIM and its associated elements
809 :param net_id: VIM identifier of the network, provided by method new_network
810 :param created_items: dictionary with extra items to be deleted. provided by method new_network
811 Returns the network identifier or raises an exception upon error or when network is not found
812 """
813
814 # ############# Stub code for SRIOV #################
815 # dvport_group = self.get_dvport_group(net_id)
816 # if dvport_group:
817 # #delete portgroup
818 # status = self.destroy_dvport_group(net_id)
819 # if status:
820 # # Remove vlanID from persistent info
821 # if net_id in self.persistent_info["used_vlanIDs"]:
822 # del self.persistent_info["used_vlanIDs"][net_id]
823 #
824 # return net_id
825
826 vcd_network = self.get_vcd_network(network_uuid=net_id)
827 if vcd_network is not None and vcd_network:
828 if self.delete_network_action(network_uuid=net_id):
829 return net_id
830 else:
831 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
832
833 def refresh_nets_status(self, net_list):
834 """Get the status of the networks
835 Params: the list of network identifiers
836 Returns a dictionary with:
837 net_id: #VIM id of this network
838 status: #Mandatory. Text with one of:
839 # DELETED (not found at vim)
840 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
841 # OTHER (Vim reported other status not understood)
842 # ERROR (VIM indicates an ERROR status)
843 # ACTIVE, INACTIVE, DOWN (admin down),
844 # BUILD (on building process)
845 #
846 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
847 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
848
849 """
850
851 dict_entry = {}
852 try:
853 for net in net_list:
854 errormsg = ''
855 vcd_network = self.get_vcd_network(network_uuid=net)
856 if vcd_network is not None and vcd_network:
857 if vcd_network['status'] == '1':
858 status = 'ACTIVE'
859 else:
860 status = 'DOWN'
861 else:
862 status = 'DELETED'
863 errormsg = 'Network not found.'
864
865 dict_entry[net] = {'status': status, 'error_msg': errormsg,
866 'vim_info': yaml.safe_dump(vcd_network)}
867 except:
868 self.logger.debug("Error in refresh_nets_status")
869 self.logger.debug(traceback.format_exc())
870
871 return dict_entry
872
873 def get_flavor(self, flavor_id):
874 """Obtain flavor details from the VIM
875 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
876 """
877 if flavor_id not in vimconnector.flavorlist:
878 raise vimconn.vimconnNotFoundException("Flavor not found.")
879 return vimconnector.flavorlist[flavor_id]
880
881 def new_flavor(self, flavor_data):
882 """Adds a tenant flavor to VIM
883 flavor_data contains a dictionary with information, keys:
884 name: flavor name
885 ram: memory (cloud type) in MBytes
886 vpcus: cpus (cloud type)
887 extended: EPA parameters
888 - numas: #items requested in same NUMA
889 memory: number of 1G huge pages memory
890 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
891 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
892 - name: interface name
893 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
894 bandwidth: X Gbps; requested guarantee bandwidth
895 vpci: requested virtual PCI address
896 disk: disk size
897 is_public:
898 #TODO to concrete
899 Returns the flavor identifier"""
900
901 # generate a new uuid put to internal dict and return it.
902 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
903 new_flavor=flavor_data
904 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
905 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
906 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
907
908 if not isinstance(ram, int):
909 raise vimconn.vimconnException("Non-integer value for ram")
910 elif not isinstance(cpu, int):
911 raise vimconn.vimconnException("Non-integer value for cpu")
912 elif not isinstance(disk, int):
913 raise vimconn.vimconnException("Non-integer value for disk")
914
915 extended_flv = flavor_data.get("extended")
916 if extended_flv:
917 numas=extended_flv.get("numas")
918 if numas:
919 for numa in numas:
920 #overwrite ram and vcpus
921 if 'memory' in numa:
922 ram = numa['memory']*1024
923 if 'paired-threads' in numa:
924 cpu = numa['paired-threads']*2
925 elif 'cores' in numa:
926 cpu = numa['cores']
927 elif 'threads' in numa:
928 cpu = numa['threads']
929
930 new_flavor[FLAVOR_RAM_KEY] = ram
931 new_flavor[FLAVOR_VCPUS_KEY] = cpu
932 new_flavor[FLAVOR_DISK_KEY] = disk
933 # generate a new uuid put to internal dict and return it.
934 flavor_id = uuid.uuid4()
935 vimconnector.flavorlist[str(flavor_id)] = new_flavor
936 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
937
938 return str(flavor_id)
939
940 def delete_flavor(self, flavor_id):
941 """Deletes a tenant flavor from VIM identify by its id
942
943 Returns the used id or raise an exception
944 """
945 if flavor_id not in vimconnector.flavorlist:
946 raise vimconn.vimconnNotFoundException("Flavor not found.")
947
948 vimconnector.flavorlist.pop(flavor_id, None)
949 return flavor_id
950
951 def new_image(self, image_dict):
952 """
953 Adds a tenant image to VIM
954 Returns:
955 200, image-id if the image is created
956 <0, message if there is an error
957 """
958
959 return self.get_image_id_from_path(image_dict['location'])
960
961 def delete_image(self, image_id):
962 """
963 Deletes a tenant image from VIM
964 Args:
965 image_id is ID of Image to be deleted
966 Return:
967 returns the image identifier in UUID format or raises an exception on error
968 """
969 conn = self.connect_as_admin()
970 if not conn:
971 raise vimconn.vimconnConnectionException("Failed to connect vCD")
972 # Get Catalog details
973 url_list = [self.url, '/api/catalog/', image_id]
974 catalog_herf = ''.join(url_list)
975
976 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
977 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
978
979 response = self.perform_request(req_type='GET',
980 url=catalog_herf,
981 headers=headers)
982
983 if response.status_code != requests.codes.ok:
984 self.logger.debug("delete_image():GET REST API call {} failed. "\
985 "Return status code {}".format(catalog_herf,
986 response.status_code))
987 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
988
989 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
990 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
991 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
992
993 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
994 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
995 for catalogItem in catalogItems:
996 catalogItem_href = catalogItem.attrib['href']
997
998 response = self.perform_request(req_type='GET',
999 url=catalogItem_href,
1000 headers=headers)
1001
1002 if response.status_code != requests.codes.ok:
1003 self.logger.debug("delete_image():GET REST API call {} failed. "\
1004 "Return status code {}".format(catalog_herf,
1005 response.status_code))
1006 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
1007 catalogItem,
1008 image_id))
1009
1010 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1011 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
1012 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
1013 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
1014
1015 #Remove catalogItem
1016 response = self.perform_request(req_type='DELETE',
1017 url=catalogitem_remove_href,
1018 headers=headers)
1019 if response.status_code == requests.codes.no_content:
1020 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1021 else:
1022 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1023
1024 #Remove catalog
1025 url_list = [self.url, '/api/admin/catalog/', image_id]
1026 catalog_remove_herf = ''.join(url_list)
1027 response = self.perform_request(req_type='DELETE',
1028 url=catalog_remove_herf,
1029 headers=headers)
1030
1031 if response.status_code == requests.codes.no_content:
1032 self.logger.debug("Deleted Catalog {}".format(image_id))
1033 return image_id
1034 else:
1035 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1036
1037
1038 def catalog_exists(self, catalog_name, catalogs):
1039 """
1040
1041 :param catalog_name:
1042 :param catalogs:
1043 :return:
1044 """
1045 for catalog in catalogs:
1046 if catalog['name'] == catalog_name:
1047 return catalog['id']
1048
1049 def create_vimcatalog(self, vca=None, catalog_name=None):
1050 """ Create new catalog entry in vCloud director.
1051
1052 Args
1053 vca: vCloud director.
1054 catalog_name catalog that client wish to create. Note no validation done for a name.
1055 Client must make sure that provide valid string representation.
1056
1057 Returns catalog id if catalog created else None.
1058
1059 """
1060 try:
1061 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1062 if lxml_catalog_element:
1063 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1064 return id_attr_value.split(':')[-1]
1065 catalogs = vca.list_catalogs()
1066 except Exception as ex:
1067 self.logger.error(
1068 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1069 raise
1070 return self.catalog_exists(catalog_name, catalogs)
1071
1072 # noinspection PyIncorrectDocstring
1073 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1074 description='', progress=False, chunk_bytes=128 * 1024):
1075 """
1076 Uploads a OVF file to a vCloud catalog
1077
1078 :param chunk_bytes:
1079 :param progress:
1080 :param description:
1081 :param image_name:
1082 :param vca:
1083 :param catalog_name: (str): The name of the catalog to upload the media.
1084 :param media_file_name: (str): The name of the local media file to upload.
1085 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1086 """
1087 os.path.isfile(media_file_name)
1088 statinfo = os.stat(media_file_name)
1089
1090 # find a catalog entry where we upload OVF.
1091 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1092 # status change.
1093 # if VCD can parse OVF we upload VMDK file
1094 try:
1095 for catalog in vca.list_catalogs():
1096 if catalog_name != catalog['name']:
1097 continue
1098 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1099 data = """
1100 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1101 """.format(catalog_name, description)
1102
1103 if self.client:
1104 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1105 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1106 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1107
1108 response = self.perform_request(req_type='POST',
1109 url=catalog_href,
1110 headers=headers,
1111 data=data)
1112
1113 if response.status_code == requests.codes.created:
1114 catalogItem = XmlElementTree.fromstring(response.content)
1115 entity = [child for child in catalogItem if
1116 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1117 href = entity.get('href')
1118 template = href
1119
1120 response = self.perform_request(req_type='GET',
1121 url=href,
1122 headers=headers)
1123
1124 if response.status_code == requests.codes.ok:
1125 headers['Content-Type'] = 'Content-Type text/xml'
1126 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1127 if result:
1128 transfer_href = result.group(1)
1129
1130 response = self.perform_request(req_type='PUT',
1131 url=transfer_href,
1132 headers=headers,
1133 data=open(media_file_name, 'rb'))
1134 if response.status_code != requests.codes.ok:
1135 self.logger.debug(
1136 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1137 media_file_name))
1138 return False
1139
1140 # TODO fix this with aync block
1141 time.sleep(5)
1142
1143 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1144
1145 # uploading VMDK file
1146 # check status of OVF upload and upload remaining files.
1147 response = self.perform_request(req_type='GET',
1148 url=template,
1149 headers=headers)
1150
1151 if response.status_code == requests.codes.ok:
1152 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1153 if result:
1154 link_href = result.group(1)
1155 # we skip ovf since it already uploaded.
1156 if 'ovf' in link_href:
1157 continue
1158 # The OVF file and VMDK must be in a same directory
1159 head, tail = os.path.split(media_file_name)
1160 file_vmdk = head + '/' + link_href.split("/")[-1]
1161 if not os.path.isfile(file_vmdk):
1162 return False
1163 statinfo = os.stat(file_vmdk)
1164 if statinfo.st_size == 0:
1165 return False
1166 hrefvmdk = link_href
1167
1168 if progress:
1169 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1170 FileTransferSpeed()]
1171 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1172
1173 bytes_transferred = 0
1174 f = open(file_vmdk, 'rb')
1175 while bytes_transferred < statinfo.st_size:
1176 my_bytes = f.read(chunk_bytes)
1177 if len(my_bytes) <= chunk_bytes:
1178 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
1179 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1180 headers['Content-Length'] = str(len(my_bytes))
1181 response = requests.put(url=hrefvmdk,
1182 headers=headers,
1183 data=my_bytes,
1184 verify=False)
1185 if response.status_code == requests.codes.ok:
1186 bytes_transferred += len(my_bytes)
1187 if progress:
1188 progress_bar.update(bytes_transferred)
1189 else:
1190 self.logger.debug(
1191 'file upload failed with error: [{}] {}'.format(response.status_code,
1192 response.content))
1193
1194 f.close()
1195 return False
1196 f.close()
1197 if progress:
1198 progress_bar.finish()
1199 time.sleep(10)
1200 return True
1201 else:
1202 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1203 format(catalog_name, media_file_name))
1204 return False
1205 except Exception as exp:
1206 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1207 .format(catalog_name,media_file_name, exp))
1208 raise vimconn.vimconnException(
1209 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1210 .format(catalog_name,media_file_name, exp))
1211
1212 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1213 return False
1214
1215 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1216 """Upload media file"""
1217 # TODO add named parameters for readability
1218
1219 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1220 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1221
1222 def validate_uuid4(self, uuid_string=None):
1223 """ Method validate correct format of UUID.
1224
1225 Return: true if string represent valid uuid
1226 """
1227 try:
1228 val = uuid.UUID(uuid_string, version=4)
1229 except ValueError:
1230 return False
1231 return True
1232
1233 def get_catalogid(self, catalog_name=None, catalogs=None):
1234 """ Method check catalog and return catalog ID in UUID format.
1235
1236 Args
1237 catalog_name: catalog name as string
1238 catalogs: list of catalogs.
1239
1240 Return: catalogs uuid
1241 """
1242
1243 for catalog in catalogs:
1244 if catalog['name'] == catalog_name:
1245 catalog_id = catalog['id']
1246 return catalog_id
1247 return None
1248
1249 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1250 """ Method check catalog and return catalog name lookup done by catalog UUID.
1251
1252 Args
1253 catalog_name: catalog name as string
1254 catalogs: list of catalogs.
1255
1256 Return: catalogs name or None
1257 """
1258
1259 if not self.validate_uuid4(uuid_string=catalog_uuid):
1260 return None
1261
1262 for catalog in catalogs:
1263 catalog_id = catalog.get('id')
1264 if catalog_id == catalog_uuid:
1265 return catalog.get('name')
1266 return None
1267
1268 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1269 """ Method check catalog and return catalog name lookup done by catalog UUID.
1270
1271 Args
1272 catalog_name: catalog name as string
1273 catalogs: list of catalogs.
1274
1275 Return: catalogs name or None
1276 """
1277
1278 if not self.validate_uuid4(uuid_string=catalog_uuid):
1279 return None
1280
1281 for catalog in catalogs:
1282 catalog_id = catalog.get('id')
1283 if catalog_id == catalog_uuid:
1284 return catalog
1285 return None
1286
1287 def get_image_id_from_path(self, path=None, progress=False):
1288 """ Method upload OVF image to vCloud director.
1289
1290 Each OVF image represented as single catalog entry in vcloud director.
1291 The method check for existing catalog entry. The check done by file name without file extension.
1292
1293 if given catalog name already present method will respond with existing catalog uuid otherwise
1294 it will create new catalog entry and upload OVF file to newly created catalog.
1295
1296 If method can't create catalog entry or upload a file it will throw exception.
1297
1298 Method accept boolean flag progress that will output progress bar. It useful method
1299 for standalone upload use case. In case to test large file upload.
1300
1301 Args
1302 path: - valid path to OVF file.
1303 progress - boolean progress bar show progress bar.
1304
1305 Return: if image uploaded correct method will provide image catalog UUID.
1306 """
1307
1308 if not path:
1309 raise vimconn.vimconnException("Image path can't be None.")
1310
1311 if not os.path.isfile(path):
1312 raise vimconn.vimconnException("Can't read file. File not found.")
1313
1314 if not os.access(path, os.R_OK):
1315 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1316
1317 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1318
1319 dirpath, filename = os.path.split(path)
1320 flname, file_extension = os.path.splitext(path)
1321 if file_extension != '.ovf':
1322 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1323 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1324
1325 catalog_name = os.path.splitext(filename)[0]
1326 catalog_md5_name = hashlib.md5(path).hexdigest()
1327 self.logger.debug("File name {} Catalog Name {} file path {} "
1328 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1329
1330 try:
1331 org,vdc = self.get_vdc_details()
1332 catalogs = org.list_catalogs()
1333 except Exception as exp:
1334 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1335 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1336
1337 if len(catalogs) == 0:
1338 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1339 if self.create_vimcatalog(org, catalog_md5_name) is None:
1340 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1341
1342 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1343 media_name=filename, medial_file_name=path, progress=progress)
1344 if not result:
1345 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1346 return self.get_catalogid(catalog_name, catalogs)
1347 else:
1348 for catalog in catalogs:
1349 # search for existing catalog if we find same name we return ID
1350 # TODO optimize this
1351 if catalog['name'] == catalog_md5_name:
1352 self.logger.debug("Found existing catalog entry for {} "
1353 "catalog id {}".format(catalog_name,
1354 self.get_catalogid(catalog_md5_name, catalogs)))
1355 return self.get_catalogid(catalog_md5_name, catalogs)
1356
1357 # if we didn't find existing catalog we create a new one and upload image.
1358 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1359 if self.create_vimcatalog(org, catalog_md5_name) is None:
1360 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1361
1362 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1363 media_name=filename, medial_file_name=path, progress=progress)
1364 if not result:
1365 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1366
1367 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1368
1369 def get_image_list(self, filter_dict={}):
1370 '''Obtain tenant images from VIM
1371 Filter_dict can be:
1372 name: image name
1373 id: image uuid
1374 checksum: image checksum
1375 location: image path
1376 Returns the image list of dictionaries:
1377 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1378 List can be empty
1379 '''
1380
1381 try:
1382 org, vdc = self.get_vdc_details()
1383 image_list = []
1384 catalogs = org.list_catalogs()
1385 if len(catalogs) == 0:
1386 return image_list
1387 else:
1388 for catalog in catalogs:
1389 catalog_uuid = catalog.get('id')
1390 name = catalog.get('name')
1391 filtered_dict = {}
1392 if filter_dict.get("name") and filter_dict["name"] != name:
1393 continue
1394 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1395 continue
1396 filtered_dict ["name"] = name
1397 filtered_dict ["id"] = catalog_uuid
1398 image_list.append(filtered_dict)
1399
1400 self.logger.debug("List of already created catalog items: {}".format(image_list))
1401 return image_list
1402 except Exception as exp:
1403 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1404
1405 def get_vappid(self, vdc=None, vapp_name=None):
1406 """ Method takes vdc object and vApp name and returns vapp uuid or None
1407
1408 Args:
1409 vdc: The VDC object.
1410 vapp_name: is application vappp name identifier
1411
1412 Returns:
1413 The return vApp name otherwise None
1414 """
1415 if vdc is None or vapp_name is None:
1416 return None
1417 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1418 try:
1419 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity \
1420 if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1421 if len(refs) == 1:
1422 return refs[0].href.split("vapp")[1][1:]
1423 except Exception as e:
1424 self.logger.exception(e)
1425 return False
1426 return None
1427
1428 def check_vapp(self, vdc=None, vapp_uuid=None):
1429 """ Method Method returns True or False if vapp deployed in vCloud director
1430
1431 Args:
1432 vca: Connector to VCA
1433 vdc: The VDC object.
1434 vappid: vappid is application identifier
1435
1436 Returns:
1437 The return True if vApp deployed
1438 :param vdc:
1439 :param vapp_uuid:
1440 """
1441 try:
1442 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1443 if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1444 for ref in refs:
1445 vappid = ref.href.split("vapp")[1][1:]
1446 # find vapp with respected vapp uuid
1447 if vappid == vapp_uuid:
1448 return True
1449 except Exception as e:
1450 self.logger.exception(e)
1451 return False
1452 return False
1453
1454 def get_namebyvappid(self, vapp_uuid=None):
1455 """Method returns vApp name from vCD and lookup done by vapp_id.
1456
1457 Args:
1458 vapp_uuid: vappid is application identifier
1459
1460 Returns:
1461 The return vApp name otherwise None
1462 """
1463 try:
1464 if self.client and vapp_uuid:
1465 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1466 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1467 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1468
1469 response = self.perform_request(req_type='GET',
1470 url=vapp_call,
1471 headers=headers)
1472 #Retry login if session expired & retry sending request
1473 if response.status_code == 403:
1474 response = self.retry_rest('GET', vapp_call)
1475
1476 tree = XmlElementTree.fromstring(response.content)
1477 return tree.attrib['name']
1478 except Exception as e:
1479 self.logger.exception(e)
1480 return None
1481 return None
1482
1483 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1484 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1485 """Adds a VM instance to VIM
1486 Params:
1487 'start': (boolean) indicates if VM must start or created in pause mode.
1488 'image_id','flavor_id': image and flavor VIM id to use for the VM
1489 'net_list': list of interfaces, each one is a dictionary with:
1490 'name': (optional) name for the interface.
1491 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1492 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1493 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1494 'mac_address': (optional) mac address to assign to this interface
1495 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1496 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1497 'type': (mandatory) can be one of:
1498 'virtual', in this case always connected to a network of type 'net_type=bridge'
1499 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1500 can created unconnected
1501 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1502 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1503 are allocated on the same physical NIC
1504 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1505 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1506 or True, it must apply the default VIM behaviour
1507 After execution the method will add the key:
1508 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1509 interface. 'net_list' is modified
1510 'cloud_config': (optional) dictionary with:
1511 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1512 'users': (optional) list of users to be inserted, each item is a dict with:
1513 'name': (mandatory) user name,
1514 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1515 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1516 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1517 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1518 'dest': (mandatory) string with the destination absolute path
1519 'encoding': (optional, by default text). Can be one of:
1520 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1521 'content' (mandatory): string with the content of the file
1522 'permissions': (optional) string with file permissions, typically octal notation '0644'
1523 'owner': (optional) file owner, string with the format 'owner:group'
1524 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1525 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1526 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1527 'size': (mandatory) string with the size of the disk in GB
1528 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1529 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1530 availability_zone_index is None
1531 Returns a tuple with the instance identifier and created_items or raises an exception on error
1532 created_items can be None or a dictionary where this method can include key-values that will be passed to
1533 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1534 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1535 as not present.
1536 """
1537 self.logger.info("Creating new instance for entry {}".format(name))
1538 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1539 "availability_zone_index {} availability_zone_list {}"\
1540 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1541 availability_zone_index, availability_zone_list))
1542
1543 #new vm name = vmname + tenant_id + uuid
1544 new_vm_name = [name, '-', str(uuid.uuid4())]
1545 vmname_andid = ''.join(new_vm_name)
1546
1547 for net in net_list:
1548 if net['type'] == "PCI-PASSTHROUGH":
1549 raise vimconn.vimconnNotSupportedException(
1550 "Current vCD version does not support type : {}".format(net['type']))
1551
1552 if len(net_list) > 10:
1553 raise vimconn.vimconnNotSupportedException(
1554 "The VM hardware versions 7 and above support upto 10 NICs only")
1555
1556 # if vm already deployed we return existing uuid
1557 # we check for presence of VDC, Catalog entry and Flavor.
1558 org, vdc = self.get_vdc_details()
1559 if vdc is None:
1560 raise vimconn.vimconnNotFoundException(
1561 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1562 catalogs = org.list_catalogs()
1563 if catalogs is None:
1564 #Retry once, if failed by refreshing token
1565 self.get_token()
1566 org = Org(self.client, resource=self.client.get_org())
1567 catalogs = org.list_catalogs()
1568 if catalogs is None:
1569 raise vimconn.vimconnNotFoundException(
1570 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1571
1572 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1573 if catalog_hash_name:
1574 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1575 else:
1576 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1577 "(Failed retrieve catalog information {})".format(name, image_id))
1578
1579 # Set vCPU and Memory based on flavor.
1580 vm_cpus = None
1581 vm_memory = None
1582 vm_disk = None
1583 numas = None
1584
1585 if flavor_id is not None:
1586 if flavor_id not in vimconnector.flavorlist:
1587 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1588 "Failed retrieve flavor information "
1589 "flavor id {}".format(name, flavor_id))
1590 else:
1591 try:
1592 flavor = vimconnector.flavorlist[flavor_id]
1593 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1594 vm_memory = flavor[FLAVOR_RAM_KEY]
1595 vm_disk = flavor[FLAVOR_DISK_KEY]
1596 extended = flavor.get("extended", None)
1597 if extended:
1598 numas=extended.get("numas", None)
1599
1600 except Exception as exp:
1601 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1602
1603 # image upload creates template name as catalog name space Template.
1604 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1605 power_on = 'false'
1606 if start:
1607 power_on = 'true'
1608
1609 # client must provide at least one entry in net_list if not we report error
1610 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1611 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1612 primary_net = None
1613 primary_netname = None
1614 primary_net_href = None
1615 network_mode = 'bridged'
1616 if net_list is not None and len(net_list) > 0:
1617 for net in net_list:
1618 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1619 primary_net = net
1620 if primary_net is None:
1621 primary_net = net_list[0]
1622
1623 try:
1624 primary_net_id = primary_net['net_id']
1625 url_list = [self.url, '/api/network/', primary_net_id]
1626 primary_net_href = ''.join(url_list)
1627 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1628 if 'name' in network_dict:
1629 primary_netname = network_dict['name']
1630
1631 except KeyError:
1632 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1633 else:
1634 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1635
1636 # use: 'data', 'bridge', 'mgmt'
1637 # create vApp. Set vcpu and ram based on flavor id.
1638 try:
1639 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1640 if not vdc_obj:
1641 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1642
1643 for retry in (1,2):
1644 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1645 catalog_items = [items.attrib]
1646
1647 if len(catalog_items) == 1:
1648 if self.client:
1649 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1650 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1651
1652 response = self.perform_request(req_type='GET',
1653 url=catalog_items[0].get('href'),
1654 headers=headers)
1655 catalogItem = XmlElementTree.fromstring(response.content)
1656 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1657 vapp_tempalte_href = entity.get("href")
1658
1659 response = self.perform_request(req_type='GET',
1660 url=vapp_tempalte_href,
1661 headers=headers)
1662 if response.status_code != requests.codes.ok:
1663 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1664 response.status_code))
1665 else:
1666 result = (response.content).replace("\n"," ")
1667
1668 vapp_template_tree = XmlElementTree.fromstring(response.content)
1669 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1670 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1671 vm_name = vm_element.get('name')
1672 vm_id = vm_element.get('id')
1673 vm_href = vm_element.get('href')
1674
1675 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1676 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1677 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1678
1679 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1680 vdc_id = vdc.get('id').split(':')[-1]
1681 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1682 vdc_id)
1683 data = """<?xml version="1.0" encoding="UTF-8"?>
1684 <InstantiateVAppTemplateParams
1685 xmlns="http://www.vmware.com/vcloud/v1.5"
1686 name="{}"
1687 deploy="false"
1688 powerOn="false"
1689 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1690 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1691 <Description>Vapp instantiation</Description>
1692 <InstantiationParams>
1693 <NetworkConfigSection>
1694 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1695 <NetworkConfig networkName="{}">
1696 <Configuration>
1697 <ParentNetwork href="{}" />
1698 <FenceMode>bridged</FenceMode>
1699 </Configuration>
1700 </NetworkConfig>
1701 </NetworkConfigSection>
1702 <LeaseSettingsSection
1703 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1704 <ovf:Info>Lease Settings</ovf:Info>
1705 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1706 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1707 </LeaseSettingsSection>
1708 </InstantiationParams>
1709 <Source href="{}"/>
1710 <SourcedItem>
1711 <Source href="{}" id="{}" name="{}"
1712 type="application/vnd.vmware.vcloud.vm+xml"/>
1713 <VmGeneralParams>
1714 <NeedsCustomization>false</NeedsCustomization>
1715 </VmGeneralParams>
1716 <InstantiationParams>
1717 <NetworkConnectionSection>
1718 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1719 <NetworkConnection network="{}">
1720 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1721 <IsConnected>true</IsConnected>
1722 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1723 </NetworkConnection>
1724 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1725 <ovf:Info>Virtual hardware requirements</ovf:Info>
1726 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1727 xmlns:vmw="http://www.vmware.com/schema/ovf">
1728 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1729 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1730 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1731 <rasd:InstanceID>4</rasd:InstanceID>
1732 <rasd:Reservation>0</rasd:Reservation>
1733 <rasd:ResourceType>3</rasd:ResourceType>
1734 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1735 <rasd:Weight>0</rasd:Weight>
1736 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1737 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1738 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1739 <rasd:Description>Memory Size</rasd:Description>
1740 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1741 <rasd:InstanceID>5</rasd:InstanceID>
1742 <rasd:Reservation>0</rasd:Reservation>
1743 <rasd:ResourceType>4</rasd:ResourceType>
1744 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1745 <rasd:Weight>0</rasd:Weight>
1746 </ovf:Item>
1747 </ovf:VirtualHardwareSection>
1748 </InstantiationParams>
1749 </SourcedItem>
1750 <AllEULAsAccepted>false</AllEULAsAccepted>
1751 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1752 primary_netname,
1753 primary_net_href,
1754 vapp_tempalte_href,
1755 vm_href,
1756 vm_id,
1757 vm_name,
1758 primary_netname,
1759 cpu=cpus,
1760 core=cores,
1761 memory=memory_mb)
1762
1763 response = self.perform_request(req_type='POST',
1764 url=instantiate_vapp_href,
1765 headers=headers,
1766 data=data)
1767
1768 if response.status_code != 201:
1769 self.logger.error("REST call {} failed reason : {}"\
1770 "status code : {}".format(instantiate_vapp_href,
1771 response.content,
1772 response.status_code))
1773 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1774 "vAapp {}".format(vmname_andid))
1775 else:
1776 vapptask = self.get_task_from_response(response.content)
1777
1778 if vapptask is None and retry==1:
1779 self.get_token() # Retry getting token
1780 continue
1781 else:
1782 break
1783
1784 if vapptask is None or vapptask is False:
1785 raise vimconn.vimconnUnexpectedResponse(
1786 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1787
1788 # wait for task to complete
1789 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1790
1791 if result.get('status') == 'success':
1792 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1793 else:
1794 raise vimconn.vimconnUnexpectedResponse(
1795 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1796
1797 except Exception as exp:
1798 raise vimconn.vimconnUnexpectedResponse(
1799 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1800
1801 # we should have now vapp in undeployed state.
1802 try:
1803 vdc_obj = VDC(self.client, href=vdc.get('href'))
1804 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1805 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1806 vapp = VApp(self.client, resource=vapp_resource)
1807
1808 except Exception as exp:
1809 raise vimconn.vimconnUnexpectedResponse(
1810 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1811 .format(vmname_andid, exp))
1812
1813 if vapp_uuid is None:
1814 raise vimconn.vimconnUnexpectedResponse(
1815 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1816 vmname_andid))
1817
1818 #Add PCI passthrough/SRIOV configrations
1819 vm_obj = None
1820 pci_devices_info = []
1821 reserve_memory = False
1822
1823 for net in net_list:
1824 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1825 pci_devices_info.append(net)
1826 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1827 reserve_memory = True
1828
1829 #Add PCI
1830 if len(pci_devices_info) > 0:
1831 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1832 vmname_andid ))
1833 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1834 pci_devices_info,
1835 vmname_andid)
1836 if PCI_devices_status:
1837 self.logger.info("Added PCI devives {} to VM {}".format(
1838 pci_devices_info,
1839 vmname_andid)
1840 )
1841 reserve_memory = True
1842 else:
1843 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1844 pci_devices_info,
1845 vmname_andid)
1846 )
1847
1848 # Modify vm disk
1849 if vm_disk:
1850 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1851 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1852 if result :
1853 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1854
1855 #Add new or existing disks to vApp
1856 if disk_list:
1857 added_existing_disk = False
1858 for disk in disk_list:
1859 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1860 image_id = disk['image_id']
1861 # Adding CD-ROM to VM
1862 # will revisit code once specification ready to support this feature
1863 self.insert_media_to_vm(vapp, image_id)
1864 elif "image_id" in disk and disk["image_id"] is not None:
1865 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1866 disk["image_id"] , vapp_uuid))
1867 self.add_existing_disk(catalogs=catalogs,
1868 image_id=disk["image_id"],
1869 size = disk["size"],
1870 template_name=templateName,
1871 vapp_uuid=vapp_uuid
1872 )
1873 added_existing_disk = True
1874 else:
1875 #Wait till added existing disk gets reflected into vCD database/API
1876 if added_existing_disk:
1877 time.sleep(5)
1878 added_existing_disk = False
1879 self.add_new_disk(vapp_uuid, disk['size'])
1880
1881 if numas:
1882 # Assigning numa affinity setting
1883 for numa in numas:
1884 if 'paired-threads-id' in numa:
1885 paired_threads_id = numa['paired-threads-id']
1886 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1887
1888 # add NICs & connect to networks in netlist
1889 try:
1890 vdc_obj = VDC(self.client, href=vdc.get('href'))
1891 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1892 vapp = VApp(self.client, resource=vapp_resource)
1893 vapp_id = vapp_resource.get('id').split(':')[-1]
1894
1895 self.logger.info("Removing primary NIC: ")
1896 # First remove all NICs so that NIC properties can be adjusted as needed
1897 self.remove_primary_network_adapter_from_all_vms(vapp)
1898
1899 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1900 primary_nic_index = 0
1901 nicIndex = 0
1902 for net in net_list:
1903 # openmano uses network id in UUID format.
1904 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1905 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1906 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1907
1908 if 'net_id' not in net:
1909 continue
1910
1911 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1912 #Same will be returned in refresh_vms_status() as vim_interface_id
1913 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1914
1915 interface_net_id = net['net_id']
1916 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1917 interface_network_mode = net['use']
1918
1919 if interface_network_mode == 'mgmt':
1920 primary_nic_index = nicIndex
1921
1922 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1923 - DHCP (The IP address is obtained from a DHCP service.)
1924 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1925 - NONE (No IP addressing mode specified.)"""
1926
1927 if primary_netname is not None:
1928 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1929 nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1930 if len(nets) == 1:
1931 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1932
1933 if interface_net_name != primary_netname:
1934 # connect network to VM - with all DHCP by default
1935 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1936 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1937
1938 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1939 nic_type = 'VMXNET3'
1940 if 'type' in net and net['type'] not in type_list:
1941 # fetching nic type from vnf
1942 if 'model' in net:
1943 if net['model'] is not None:
1944 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1945 nic_type = 'VMXNET3'
1946 else:
1947 nic_type = net['model']
1948
1949 self.logger.info("new_vminstance(): adding network adapter "\
1950 "to a network {}".format(nets[0].get('name')))
1951 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1952 primary_nic_index,
1953 nicIndex,
1954 net,
1955 nic_type=nic_type)
1956 else:
1957 self.logger.info("new_vminstance(): adding network adapter "\
1958 "to a network {}".format(nets[0].get('name')))
1959 if net['type'] in ['SR-IOV', 'VF']:
1960 nic_type = net['type']
1961 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1962 primary_nic_index,
1963 nicIndex,
1964 net,
1965 nic_type=nic_type)
1966 nicIndex += 1
1967
1968 # cloud-init for ssh-key injection
1969 if cloud_config:
1970 # Create a catalog which will be carrying the config drive ISO
1971 # This catalog is deleted during vApp deletion. The catalog name carries
1972 # vApp UUID and thats how it gets identified during its deletion.
1973 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1974 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1975 config_drive_catalog_name))
1976 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1977 if config_drive_catalog_id is None:
1978 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1979 "ISO".format(config_drive_catalog_name)
1980 raise Exception(error_msg)
1981
1982 # Create config-drive ISO
1983 _, userdata = self._create_user_data(cloud_config)
1984 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1985 iso_path = self.create_config_drive_iso(userdata)
1986 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1987
1988 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1989 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1990 # Attach the config-drive ISO to the VM
1991 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1992 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
1993 time.sleep(5)
1994 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1995 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1996
1997 # If VM has PCI devices or SRIOV reserve memory for VM
1998 if reserve_memory:
1999 self.reserve_memory_for_all_vms(vapp, memory_mb)
2000
2001 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
2002
2003 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2004 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2005 if result.get('status') == 'success':
2006 self.logger.info("new_vminstance(): Successfully power on "\
2007 "vApp {}".format(vmname_andid))
2008 else:
2009 self.logger.error("new_vminstance(): failed to power on vApp "\
2010 "{}".format(vmname_andid))
2011
2012 except Exception as exp:
2013 try:
2014 self.delete_vminstance(vapp_uuid)
2015 except Exception as exp2:
2016 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2017 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2018 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
2019 .format(name, exp))
2020 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
2021 .format(name, exp))
2022
2023 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2024 wait_time = 0
2025 vapp_uuid = None
2026 while wait_time <= MAX_WAIT_TIME:
2027 try:
2028 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2029 vapp = VApp(self.client, resource=vapp_resource)
2030 except Exception as exp:
2031 raise vimconn.vimconnUnexpectedResponse(
2032 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
2033 .format(vmname_andid, exp))
2034
2035 #if vapp and vapp.me.deployed:
2036 if vapp and vapp_resource.get('deployed') == 'true':
2037 vapp_uuid = vapp_resource.get('id').split(':')[-1]
2038 break
2039 else:
2040 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
2041 time.sleep(INTERVAL_TIME)
2042
2043 wait_time +=INTERVAL_TIME
2044
2045 #SET Affinity Rule for VM
2046 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2047 #While creating VIM account user has to pass the Host Group names in availability_zone list
2048 #"availability_zone" is a part of VIM "config" parameters
2049 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2050 #Host groups are referred as availability zones
2051 #With following procedure, deployed VM will be added into a VM group.
2052 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2053 if(availability_zone_list):
2054 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2055 #Admin access required for creating Affinity rules
2056 client = self.connect_as_admin()
2057 if not client:
2058 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2059 else:
2060 self.client = client
2061 if self.client:
2062 headers = {'Accept':'application/*+xml;version=27.0',
2063 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2064 #Step1: Get provider vdc details from organization
2065 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2066 if pvdc_href is not None:
2067 #Step2: Found required pvdc, now get resource pool information
2068 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2069 if respool_href is None:
2070 #Raise error if respool_href not found
2071 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2072 .format(pvdc_href)
2073 self.log_message(msg)
2074
2075 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2076 # get availability Zone
2077 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2078 # check if provided av zone(hostGroup) is present in vCD VIM
2079 status = self.check_availibility_zone(vm_az, respool_href, headers)
2080 if status is False:
2081 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2082 "resource pool {} status: {}".format(vm_az,respool_href,status)
2083 self.log_message(msg)
2084 else:
2085 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2086
2087 #Step4: Find VM group references to create vm group
2088 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2089 if vmgrp_href == None:
2090 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2091 self.log_message(msg)
2092
2093 #Step5: Create a VmGroup with name az_VmGroup
2094 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2095 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2096 if status is not True:
2097 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2098 self.log_message(msg)
2099
2100 #VM Group url to add vms to vm group
2101 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2102
2103 #Step6: Add VM to VM Group
2104 #Find VM uuid from vapp_uuid
2105 vm_details = self.get_vapp_details_rest(vapp_uuid)
2106 vm_uuid = vm_details['vmuuid']
2107
2108 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2109 if status is not True:
2110 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2111 self.log_message(msg)
2112
2113 #Step7: Create VM to Host affinity rule
2114 addrule_href = self.get_add_rule_reference (respool_href, headers)
2115 if addrule_href is None:
2116 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2117 .format(respool_href)
2118 self.log_message(msg)
2119
2120 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2121 if status is False:
2122 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2123 .format(name, vm_az)
2124 self.log_message(msg)
2125 else:
2126 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2127 .format(name, vm_az))
2128 #Reset token to a normal user to perform other operations
2129 self.get_token()
2130
2131 if vapp_uuid is not None:
2132 return vapp_uuid, None
2133 else:
2134 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2135
2136 def create_config_drive_iso(self, user_data):
2137 tmpdir = tempfile.mkdtemp()
2138 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2139 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2140 os.makedirs(latest_dir)
2141 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2142 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2143 userdata_file_obj.write(user_data)
2144 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2145 "launch_index": 0,
2146 "name": "ConfigDrive",
2147 "uuid": str(uuid.uuid4())}
2148 )
2149 )
2150 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2151 iso_path=iso_path, source_dir_path=tmpdir)
2152 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2153 try:
2154 FNULL = open(os.devnull, 'w')
2155 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2156 except subprocess.CalledProcessError as e:
2157 shutil.rmtree(tmpdir, ignore_errors=True)
2158 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2159 self.logger.error(error_msg)
2160 raise Exception(error_msg)
2161 return iso_path
2162
2163 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2164 if not os.path.isfile(iso_file_path):
2165 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2166 self.logger.error(error_msg)
2167 raise Exception(error_msg)
2168 iso_file_stat = os.stat(iso_file_path)
2169 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2170 <Media
2171 xmlns="http://www.vmware.com/vcloud/v1.5"
2172 name="{iso_name}"
2173 size="{iso_size}"
2174 imageType="iso">
2175 <Description>ISO image for config-drive</Description>
2176 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2177 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2178 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2179 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2180 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2181 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2182
2183 if response.status_code != 201:
2184 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2185 self.logger.error(error_msg)
2186 raise Exception(error_msg)
2187
2188 catalogItem = XmlElementTree.fromstring(response.content)
2189 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2190 entity_href = entity.get('href')
2191
2192 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2193 if response.status_code != 200:
2194 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2195
2196 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2197 if match:
2198 media_upload_href = match.group(1)
2199 else:
2200 raise Exception('Could not parse the upload URL for the media file from the last response')
2201 upload_iso_task = self.get_task_from_response(response.content)
2202 headers['Content-Type'] = 'application/octet-stream'
2203 response = self.perform_request(req_type='PUT',
2204 url=media_upload_href,
2205 headers=headers,
2206 data=open(iso_file_path, 'rb'))
2207
2208 if response.status_code != 200:
2209 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2210 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2211 if result.get('status') != 'success':
2212 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2213
2214 def get_vcd_availibility_zones(self,respool_href, headers):
2215 """ Method to find presence of av zone is VIM resource pool
2216
2217 Args:
2218 respool_href - resource pool href
2219 headers - header information
2220
2221 Returns:
2222 vcd_az - list of azone present in vCD
2223 """
2224 vcd_az = []
2225 url=respool_href
2226 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2227
2228 if resp.status_code != requests.codes.ok:
2229 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2230 else:
2231 #Get the href to hostGroups and find provided hostGroup is present in it
2232 resp_xml = XmlElementTree.fromstring(resp.content)
2233 for child in resp_xml:
2234 if 'VMWProviderVdcResourcePool' in child.tag:
2235 for schild in child:
2236 if 'Link' in schild.tag:
2237 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2238 hostGroup = schild.attrib.get('href')
2239 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2240 if hg_resp.status_code != requests.codes.ok:
2241 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2242 else:
2243 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2244 for hostGroup in hg_resp_xml:
2245 if 'HostGroup' in hostGroup.tag:
2246 #append host group name to the list
2247 vcd_az.append(hostGroup.attrib.get("name"))
2248 return vcd_az
2249
2250
2251 def set_availability_zones(self):
2252 """
2253 Set vim availability zone
2254 """
2255
2256 vim_availability_zones = None
2257 availability_zone = None
2258 if 'availability_zone' in self.config:
2259 vim_availability_zones = self.config.get('availability_zone')
2260 if isinstance(vim_availability_zones, str):
2261 availability_zone = [vim_availability_zones]
2262 elif isinstance(vim_availability_zones, list):
2263 availability_zone = vim_availability_zones
2264 else:
2265 return availability_zone
2266
2267 return availability_zone
2268
2269
2270 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2271 """
2272 Return the availability zone to be used by the created VM.
2273 returns: The VIM availability zone to be used or None
2274 """
2275 if availability_zone_index is None:
2276 if not self.config.get('availability_zone'):
2277 return None
2278 elif isinstance(self.config.get('availability_zone'), str):
2279 return self.config['availability_zone']
2280 else:
2281 return self.config['availability_zone'][0]
2282
2283 vim_availability_zones = self.availability_zone
2284
2285 # check if VIM offer enough availability zones describe in the VNFD
2286 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2287 # check if all the names of NFV AV match VIM AV names
2288 match_by_index = False
2289 for av in availability_zone_list:
2290 if av not in vim_availability_zones:
2291 match_by_index = True
2292 break
2293 if match_by_index:
2294 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2295 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2296 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2297 self.logger.debug("VIM Availability zones will be used by index")
2298 return vim_availability_zones[availability_zone_index]
2299 else:
2300 return availability_zone_list[availability_zone_index]
2301 else:
2302 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2303
2304
2305 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2306 """ Method to create VM to Host Affinity rule in vCD
2307
2308 Args:
2309 addrule_href - href to make a POST request
2310 vmgrpname - name of the VM group created
2311 hostgrpnmae - name of the host group created earlier
2312 polarity - Affinity or Anti-affinity (default: Affinity)
2313 headers - headers to make REST call
2314
2315 Returns:
2316 True- if rule is created
2317 False- Failed to create rule due to some error
2318
2319 """
2320 task_status = False
2321 rule_name = polarity + "_" + vmgrpname
2322 payload = """<?xml version="1.0" encoding="UTF-8"?>
2323 <vmext:VMWVmHostAffinityRule
2324 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2325 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2326 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2327 <vcloud:Name>{}</vcloud:Name>
2328 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2329 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2330 <vcloud:Polarity>{}</vcloud:Polarity>
2331 <vmext:HostGroupName>{}</vmext:HostGroupName>
2332 <vmext:VmGroupName>{}</vmext:VmGroupName>
2333 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2334
2335 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2336
2337 if resp.status_code != requests.codes.accepted:
2338 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2339 task_status = False
2340 return task_status
2341 else:
2342 affinity_task = self.get_task_from_response(resp.content)
2343 self.logger.debug ("affinity_task: {}".format(affinity_task))
2344 if affinity_task is None or affinity_task is False:
2345 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2346 # wait for task to complete
2347 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2348 if result.get('status') == 'success':
2349 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2350 return True
2351 else:
2352 raise vimconn.vimconnUnexpectedResponse(
2353 "failed to create affinity rule {}".format(rule_name))
2354
2355
2356 def get_add_rule_reference (self, respool_href, headers):
2357 """ This method finds href to add vm to host affinity rule to vCD
2358
2359 Args:
2360 respool_href- href to resource pool
2361 headers- header information to make REST call
2362
2363 Returns:
2364 None - if no valid href to add rule found or
2365 addrule_href - href to add vm to host affinity rule of resource pool
2366 """
2367 addrule_href = None
2368 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2369
2370 if resp.status_code != requests.codes.ok:
2371 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2372 else:
2373
2374 resp_xml = XmlElementTree.fromstring(resp.content)
2375 for child in resp_xml:
2376 if 'VMWProviderVdcResourcePool' in child.tag:
2377 for schild in child:
2378 if 'Link' in schild.tag:
2379 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2380 schild.attrib.get('rel') == "add":
2381 addrule_href = schild.attrib.get('href')
2382 break
2383
2384 return addrule_href
2385
2386
2387 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2388 """ Method to add deployed VM to newly created VM Group.
2389 This is required to create VM to Host affinity in vCD
2390
2391 Args:
2392 vm_uuid- newly created vm uuid
2393 vmGroupNameURL- URL to VM Group name
2394 vmGroup_name- Name of VM group created
2395 headers- Headers for REST request
2396
2397 Returns:
2398 True- if VM added to VM group successfully
2399 False- if any error encounter
2400 """
2401
2402 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2403
2404 if addvm_resp.status_code != requests.codes.ok:
2405 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2406 .format(vmGroupNameURL, addvm_resp.status_code))
2407 return False
2408 else:
2409 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2410 for child in resp_xml:
2411 if child.tag.split('}')[1] == 'Link':
2412 if child.attrib.get("rel") == "addVms":
2413 addvmtogrpURL = child.attrib.get("href")
2414
2415 #Get vm details
2416 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2417 vmdetailsURL = ''.join(url_list)
2418
2419 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2420
2421 if resp.status_code != requests.codes.ok:
2422 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2423 return False
2424
2425 #Parse VM details
2426 resp_xml = XmlElementTree.fromstring(resp.content)
2427 if resp_xml.tag.split('}')[1] == "Vm":
2428 vm_id = resp_xml.attrib.get("id")
2429 vm_name = resp_xml.attrib.get("name")
2430 vm_href = resp_xml.attrib.get("href")
2431 #print vm_id, vm_name, vm_href
2432 #Add VM into VMgroup
2433 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2434 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2435 xmlns="http://www.vmware.com/vcloud/versions" \
2436 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2437 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2438 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2439 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2440 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2441 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2442 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2443 <ns2:VmReference href="{}" id="{}" name="{}" \
2444 type="application/vnd.vmware.vcloud.vm+xml" />\
2445 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2446
2447 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2448
2449 if addvmtogrp_resp.status_code != requests.codes.accepted:
2450 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2451 return False
2452 else:
2453 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2454 return True
2455
2456
2457 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2458 """Method to create a VM group in vCD
2459
2460 Args:
2461 vmgroup_name : Name of VM group to be created
2462 vmgroup_href : href for vmgroup
2463 headers- Headers for REST request
2464 """
2465 #POST to add URL with required data
2466 vmgroup_status = False
2467 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2468 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2469 <vmCount>1</vmCount>\
2470 </VMWVmGroup>""".format(vmgroup_name)
2471 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2472
2473 if resp.status_code != requests.codes.accepted:
2474 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2475 return vmgroup_status
2476 else:
2477 vmgroup_task = self.get_task_from_response(resp.content)
2478 if vmgroup_task is None or vmgroup_task is False:
2479 raise vimconn.vimconnUnexpectedResponse(
2480 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2481
2482 # wait for task to complete
2483 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2484
2485 if result.get('status') == 'success':
2486 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2487 #time.sleep(10)
2488 vmgroup_status = True
2489 return vmgroup_status
2490 else:
2491 raise vimconn.vimconnUnexpectedResponse(\
2492 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2493
2494
2495 def find_vmgroup_reference(self, url, headers):
2496 """ Method to create a new VMGroup which is required to add created VM
2497 Args:
2498 url- resource pool href
2499 headers- header information
2500
2501 Returns:
2502 returns href to VM group to create VM group
2503 """
2504 #Perform GET on resource pool to find 'add' link to create VMGroup
2505 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2506 vmgrp_href = None
2507 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2508
2509 if resp.status_code != requests.codes.ok:
2510 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2511 else:
2512 #Get the href to add vmGroup to vCD
2513 resp_xml = XmlElementTree.fromstring(resp.content)
2514 for child in resp_xml:
2515 if 'VMWProviderVdcResourcePool' in child.tag:
2516 for schild in child:
2517 if 'Link' in schild.tag:
2518 #Find href with type VMGroup and rel with add
2519 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2520 and schild.attrib.get('rel') == "add":
2521 vmgrp_href = schild.attrib.get('href')
2522 return vmgrp_href
2523
2524
2525 def check_availibility_zone(self, az, respool_href, headers):
2526 """ Method to verify requested av zone is present or not in provided
2527 resource pool
2528
2529 Args:
2530 az - name of hostgroup (availibility_zone)
2531 respool_href - Resource Pool href
2532 headers - Headers to make REST call
2533 Returns:
2534 az_found - True if availibility_zone is found else False
2535 """
2536 az_found = False
2537 headers['Accept']='application/*+xml;version=27.0'
2538 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2539
2540 if resp.status_code != requests.codes.ok:
2541 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2542 else:
2543 #Get the href to hostGroups and find provided hostGroup is present in it
2544 resp_xml = XmlElementTree.fromstring(resp.content)
2545
2546 for child in resp_xml:
2547 if 'VMWProviderVdcResourcePool' in child.tag:
2548 for schild in child:
2549 if 'Link' in schild.tag:
2550 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2551 hostGroup_href = schild.attrib.get('href')
2552 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2553 if hg_resp.status_code != requests.codes.ok:
2554 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2555 else:
2556 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2557 for hostGroup in hg_resp_xml:
2558 if 'HostGroup' in hostGroup.tag:
2559 if hostGroup.attrib.get("name") == az:
2560 az_found = True
2561 break
2562 return az_found
2563
2564
2565 def get_pvdc_for_org(self, org_vdc, headers):
2566 """ This method gets provider vdc references from organisation
2567
2568 Args:
2569 org_vdc - name of the organisation VDC to find pvdc
2570 headers - headers to make REST call
2571
2572 Returns:
2573 None - if no pvdc href found else
2574 pvdc_href - href to pvdc
2575 """
2576
2577 #Get provider VDC references from vCD
2578 pvdc_href = None
2579 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2580 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2581 url = ''.join(url_list)
2582
2583 response = self.perform_request(req_type='GET',url=url, headers=headers)
2584 if response.status_code != requests.codes.ok:
2585 self.logger.debug ("REST API call {} failed. Return status code {}"\
2586 .format(url, response.status_code))
2587 else:
2588 xmlroot_response = XmlElementTree.fromstring(response.content)
2589 for child in xmlroot_response:
2590 if 'ProviderVdcReference' in child.tag:
2591 pvdc_href = child.attrib.get('href')
2592 #Get vdcReferences to find org
2593 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2594 if pvdc_resp.status_code != requests.codes.ok:
2595 raise vimconn.vimconnException("REST API call {} failed. "\
2596 "Return status code {}"\
2597 .format(url, pvdc_resp.status_code))
2598
2599 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2600 for child in pvdc_resp_xml:
2601 if 'Link' in child.tag:
2602 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2603 vdc_href = child.attrib.get('href')
2604
2605 #Check if provided org is present in vdc
2606 vdc_resp = self.perform_request(req_type='GET',
2607 url=vdc_href,
2608 headers=headers)
2609 if vdc_resp.status_code != requests.codes.ok:
2610 raise vimconn.vimconnException("REST API call {} failed. "\
2611 "Return status code {}"\
2612 .format(url, vdc_resp.status_code))
2613 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2614 for child in vdc_resp_xml:
2615 if 'VdcReference' in child.tag:
2616 if child.attrib.get('name') == org_vdc:
2617 return pvdc_href
2618
2619
2620 def get_resource_pool_details(self, pvdc_href, headers):
2621 """ Method to get resource pool information.
2622 Host groups are property of resource group.
2623 To get host groups, we need to GET details of resource pool.
2624
2625 Args:
2626 pvdc_href: href to pvdc details
2627 headers: headers
2628
2629 Returns:
2630 respool_href - Returns href link reference to resource pool
2631 """
2632 respool_href = None
2633 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2634
2635 if resp.status_code != requests.codes.ok:
2636 self.logger.debug ("REST API call {} failed. Return status code {}"\
2637 .format(pvdc_href, resp.status_code))
2638 else:
2639 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2640 for child in respool_resp_xml:
2641 if 'Link' in child.tag:
2642 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2643 respool_href = child.attrib.get("href")
2644 break
2645 return respool_href
2646
2647
2648 def log_message(self, msg):
2649 """
2650 Method to log error messages related to Affinity rule creation
2651 in new_vminstance & raise Exception
2652 Args :
2653 msg - Error message to be logged
2654
2655 """
2656 #get token to connect vCD as a normal user
2657 self.get_token()
2658 self.logger.debug(msg)
2659 raise vimconn.vimconnException(msg)
2660
2661
2662 ##
2663 ##
2664 ## based on current discussion
2665 ##
2666 ##
2667 ## server:
2668 # created: '2016-09-08T11:51:58'
2669 # description: simple-instance.linux1.1
2670 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2671 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2672 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2673 # status: ACTIVE
2674 # error_msg:
2675 # interfaces: …
2676 #
2677 def get_vminstance(self, vim_vm_uuid=None):
2678 """Returns the VM instance information from VIM"""
2679
2680 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2681
2682 org, vdc = self.get_vdc_details()
2683 if vdc is None:
2684 raise vimconn.vimconnConnectionException(
2685 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2686
2687 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2688 if not vm_info_dict:
2689 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2690 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2691
2692 status_key = vm_info_dict['status']
2693 error = ''
2694 try:
2695 vm_dict = {'created': vm_info_dict['created'],
2696 'description': vm_info_dict['name'],
2697 'status': vcdStatusCode2manoFormat[int(status_key)],
2698 'hostId': vm_info_dict['vmuuid'],
2699 'error_msg': error,
2700 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2701
2702 if 'interfaces' in vm_info_dict:
2703 vm_dict['interfaces'] = vm_info_dict['interfaces']
2704 else:
2705 vm_dict['interfaces'] = []
2706 except KeyError:
2707 vm_dict = {'created': '',
2708 'description': '',
2709 'status': vcdStatusCode2manoFormat[int(-1)],
2710 'hostId': vm_info_dict['vmuuid'],
2711 'error_msg': "Inconsistency state",
2712 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2713
2714 return vm_dict
2715
2716 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2717 """Method poweroff and remove VM instance from vcloud director network.
2718
2719 Args:
2720 vm__vim_uuid: VM UUID
2721
2722 Returns:
2723 Returns the instance identifier
2724 """
2725
2726 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2727
2728 org, vdc = self.get_vdc_details()
2729 vdc_obj = VDC(self.client, href=vdc.get('href'))
2730 if vdc_obj is None:
2731 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2732 self.tenant_name))
2733 raise vimconn.vimconnException(
2734 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2735
2736 try:
2737 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2738 if vapp_name is None:
2739 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2740 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2741 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2742 vapp_resource = vdc_obj.get_vapp(vapp_name)
2743 vapp = VApp(self.client, resource=vapp_resource)
2744
2745 # Delete vApp and wait for status change if task executed and vApp is None.
2746
2747 if vapp:
2748 if vapp_resource.get('deployed') == 'true':
2749 self.logger.info("Powering off vApp {}".format(vapp_name))
2750 #Power off vApp
2751 powered_off = False
2752 wait_time = 0
2753 while wait_time <= MAX_WAIT_TIME:
2754 power_off_task = vapp.power_off()
2755 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2756
2757 if result.get('status') == 'success':
2758 powered_off = True
2759 break
2760 else:
2761 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2762 time.sleep(INTERVAL_TIME)
2763
2764 wait_time +=INTERVAL_TIME
2765 if not powered_off:
2766 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2767 else:
2768 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2769
2770 #Undeploy vApp
2771 self.logger.info("Undeploy vApp {}".format(vapp_name))
2772 wait_time = 0
2773 undeployed = False
2774 while wait_time <= MAX_WAIT_TIME:
2775 vapp = VApp(self.client, resource=vapp_resource)
2776 if not vapp:
2777 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2778 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2779 undeploy_task = vapp.undeploy()
2780
2781 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2782 if result.get('status') == 'success':
2783 undeployed = True
2784 break
2785 else:
2786 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2787 time.sleep(INTERVAL_TIME)
2788
2789 wait_time +=INTERVAL_TIME
2790
2791 if not undeployed:
2792 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2793
2794 # delete vapp
2795 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2796
2797 if vapp is not None:
2798 wait_time = 0
2799 result = False
2800
2801 while wait_time <= MAX_WAIT_TIME:
2802 vapp = VApp(self.client, resource=vapp_resource)
2803 if not vapp:
2804 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2805 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2806
2807 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2808
2809 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2810 if result.get('status') == 'success':
2811 break
2812 else:
2813 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2814 time.sleep(INTERVAL_TIME)
2815
2816 wait_time +=INTERVAL_TIME
2817
2818 if result is None:
2819 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2820 else:
2821 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2822 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2823 catalog_list = self.get_image_list()
2824 try:
2825 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2826 if catalog_['name'] == config_drive_catalog_name][0]
2827 except IndexError:
2828 pass
2829 if config_drive_catalog_id:
2830 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2831 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2832 self.delete_image(config_drive_catalog_id)
2833 return vm__vim_uuid
2834 except:
2835 self.logger.debug(traceback.format_exc())
2836 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2837
2838
2839 def refresh_vms_status(self, vm_list):
2840 """Get the status of the virtual machines and their interfaces/ports
2841 Params: the list of VM identifiers
2842 Returns a dictionary with:
2843 vm_id: #VIM id of this Virtual Machine
2844 status: #Mandatory. Text with one of:
2845 # DELETED (not found at vim)
2846 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2847 # OTHER (Vim reported other status not understood)
2848 # ERROR (VIM indicates an ERROR status)
2849 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2850 # CREATING (on building process), ERROR
2851 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2852 #
2853 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2854 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2855 interfaces:
2856 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2857 mac_address: #Text format XX:XX:XX:XX:XX:XX
2858 vim_net_id: #network id where this interface is connected
2859 vim_interface_id: #interface/port VIM id
2860 ip_address: #null, or text with IPv4, IPv6 address
2861 """
2862
2863 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2864
2865 org,vdc = self.get_vdc_details()
2866 if vdc is None:
2867 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2868
2869 vms_dict = {}
2870 nsx_edge_list = []
2871 for vmuuid in vm_list:
2872 vapp_name = self.get_namebyvappid(vmuuid)
2873 if vapp_name is not None:
2874
2875 try:
2876 vm_pci_details = self.get_vm_pci_details(vmuuid)
2877 vdc_obj = VDC(self.client, href=vdc.get('href'))
2878 vapp_resource = vdc_obj.get_vapp(vapp_name)
2879 the_vapp = VApp(self.client, resource=vapp_resource)
2880
2881 vm_details = {}
2882 for vm in the_vapp.get_all_vms():
2883 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2884 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2885 response = self.perform_request(req_type='GET',
2886 url=vm.get('href'),
2887 headers=headers)
2888
2889 if response.status_code != 200:
2890 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2891 "status code : {}".format(vm.get('href'),
2892 response.content,
2893 response.status_code))
2894 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2895 "VM details")
2896 xmlroot = XmlElementTree.fromstring(response.content)
2897
2898
2899 result = response.content.replace("\n"," ")
2900 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2901 if hdd_match:
2902 hdd_mb = hdd_match.group(1)
2903 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2904 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2905 if cpus_match:
2906 cpus = cpus_match.group(1)
2907 vm_details['cpus'] = int(cpus) if cpus else None
2908 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2909 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2910 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2911 vm_details['id'] = xmlroot.get('id')
2912 vm_details['name'] = xmlroot.get('name')
2913 vm_info = [vm_details]
2914 if vm_pci_details:
2915 vm_info[0].update(vm_pci_details)
2916
2917 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2918 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2919 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2920
2921 # get networks
2922 vm_ip = None
2923 vm_mac = None
2924 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2925 for network in networks:
2926 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2927 vm_mac = mac_s.group(1) if mac_s else None
2928 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2929 vm_ip = ip_s.group(1) if ip_s else None
2930
2931 if vm_ip is None:
2932 if not nsx_edge_list:
2933 nsx_edge_list = self.get_edge_details()
2934 if nsx_edge_list is None:
2935 raise vimconn.vimconnException("refresh_vms_status:"\
2936 "Failed to get edge details from NSX Manager")
2937 if vm_mac is not None:
2938 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2939
2940 net_s = re.search('network="(.*?)"',network)
2941 network_name = net_s.group(1) if net_s else None
2942
2943 vm_net_id = self.get_network_id_by_name(network_name)
2944 interface = {"mac_address": vm_mac,
2945 "vim_net_id": vm_net_id,
2946 "vim_interface_id": vm_net_id,
2947 "ip_address": vm_ip}
2948
2949 vm_dict["interfaces"].append(interface)
2950
2951 # add a vm to vm dict
2952 vms_dict.setdefault(vmuuid, vm_dict)
2953 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2954 except Exception as exp:
2955 self.logger.debug("Error in response {}".format(exp))
2956 self.logger.debug(traceback.format_exc())
2957
2958 return vms_dict
2959
2960
2961 def get_edge_details(self):
2962 """Get the NSX edge list from NSX Manager
2963 Returns list of NSX edges
2964 """
2965 edge_list = []
2966 rheaders = {'Content-Type': 'application/xml'}
2967 nsx_api_url = '/api/4.0/edges'
2968
2969 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2970
2971 try:
2972 resp = requests.get(self.nsx_manager + nsx_api_url,
2973 auth = (self.nsx_user, self.nsx_password),
2974 verify = False, headers = rheaders)
2975 if resp.status_code == requests.codes.ok:
2976 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2977 for edge_pages in paged_Edge_List:
2978 if edge_pages.tag == 'edgePage':
2979 for edge_summary in edge_pages:
2980 if edge_summary.tag == 'pagingInfo':
2981 for element in edge_summary:
2982 if element.tag == 'totalCount' and element.text == '0':
2983 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2984 .format(self.nsx_manager))
2985
2986 if edge_summary.tag == 'edgeSummary':
2987 for element in edge_summary:
2988 if element.tag == 'id':
2989 edge_list.append(element.text)
2990 else:
2991 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2992 .format(self.nsx_manager))
2993
2994 if not edge_list:
2995 raise vimconn.vimconnException("get_edge_details: "\
2996 "No NSX edge details found: {}"
2997 .format(self.nsx_manager))
2998 else:
2999 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
3000 return edge_list
3001 else:
3002 self.logger.debug("get_edge_details: "
3003 "Failed to get NSX edge details from NSX Manager: {}"
3004 .format(resp.content))
3005 return None
3006
3007 except Exception as exp:
3008 self.logger.debug("get_edge_details: "\
3009 "Failed to get NSX edge details from NSX Manager: {}"
3010 .format(exp))
3011 raise vimconn.vimconnException("get_edge_details: "\
3012 "Failed to get NSX edge details from NSX Manager: {}"
3013 .format(exp))
3014
3015
3016 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3017 """Get IP address details from NSX edges, using the MAC address
3018 PARAMS: nsx_edges : List of NSX edges
3019 mac_address : Find IP address corresponding to this MAC address
3020 Returns: IP address corrresponding to the provided MAC address
3021 """
3022
3023 ip_addr = None
3024 rheaders = {'Content-Type': 'application/xml'}
3025
3026 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3027
3028 try:
3029 for edge in nsx_edges:
3030 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
3031
3032 resp = requests.get(self.nsx_manager + nsx_api_url,
3033 auth = (self.nsx_user, self.nsx_password),
3034 verify = False, headers = rheaders)
3035
3036 if resp.status_code == requests.codes.ok:
3037 dhcp_leases = XmlElementTree.fromstring(resp.text)
3038 for child in dhcp_leases:
3039 if child.tag == 'dhcpLeaseInfo':
3040 dhcpLeaseInfo = child
3041 for leaseInfo in dhcpLeaseInfo:
3042 for elem in leaseInfo:
3043 if (elem.tag)=='macAddress':
3044 edge_mac_addr = elem.text
3045 if (elem.tag)=='ipAddress':
3046 ip_addr = elem.text
3047 if edge_mac_addr is not None:
3048 if edge_mac_addr == mac_address:
3049 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
3050 .format(ip_addr, mac_address,edge))
3051 return ip_addr
3052 else:
3053 self.logger.debug("get_ipaddr_from_NSXedge: "\
3054 "Error occurred while getting DHCP lease info from NSX Manager: {}"
3055 .format(resp.content))
3056
3057 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
3058 return None
3059
3060 except XmlElementTree.ParseError as Err:
3061 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
3062
3063 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
3064 """Send and action over a VM instance from VIM
3065 Returns the vm_id if the action was successfully sent to the VIM"""
3066
3067 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
3068 if vm__vim_uuid is None or action_dict is None:
3069 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
3070
3071 org, vdc = self.get_vdc_details()
3072 if vdc is None:
3073 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3074
3075 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3076 if vapp_name is None:
3077 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3078 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3079 else:
3080 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3081
3082 try:
3083 vdc_obj = VDC(self.client, href=vdc.get('href'))
3084 vapp_resource = vdc_obj.get_vapp(vapp_name)
3085 vapp = VApp(self.client, resource=vapp_resource)
3086 if "start" in action_dict:
3087 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3088 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3089 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3090 self.instance_actions_result("start", result, vapp_name)
3091 elif "rebuild" in action_dict:
3092 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3093 rebuild_task = vapp.deploy(power_on=True)
3094 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3095 self.instance_actions_result("rebuild", result, vapp_name)
3096 elif "pause" in action_dict:
3097 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3098 pause_task = vapp.undeploy(action='suspend')
3099 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3100 self.instance_actions_result("pause", result, vapp_name)
3101 elif "resume" in action_dict:
3102 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3103 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3104 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3105 self.instance_actions_result("resume", result, vapp_name)
3106 elif "shutoff" in action_dict or "shutdown" in action_dict:
3107 action_name , value = list(action_dict.items())[0]
3108 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3109 shutdown_task = vapp.shutdown()
3110 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3111 if action_name == "shutdown":
3112 self.instance_actions_result("shutdown", result, vapp_name)
3113 else:
3114 self.instance_actions_result("shutoff", result, vapp_name)
3115 elif "forceOff" in action_dict:
3116 result = vapp.undeploy(action='powerOff')
3117 self.instance_actions_result("forceOff", result, vapp_name)
3118 elif "reboot" in action_dict:
3119 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3120 reboot_task = vapp.reboot()
3121 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3122 else:
3123 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3124 return vm__vim_uuid
3125 except Exception as exp :
3126 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3127 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3128
3129 def instance_actions_result(self, action, result, vapp_name):
3130 if result.get('status') == 'success':
3131 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3132 else:
3133 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3134
3135 def get_vminstance_console(self, vm_id, console_type="novnc"):
3136 """
3137 Get a console for the virtual machine
3138 Params:
3139 vm_id: uuid of the VM
3140 console_type, can be:
3141 "novnc" (by default), "xvpvnc" for VNC types,
3142 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3143 Returns dict with the console parameters:
3144 protocol: ssh, ftp, http, https, ...
3145 server: usually ip address
3146 port: the http, ssh, ... port
3147 suffix: extra text, e.g. the http path and query string
3148 """
3149 console_dict = {}
3150
3151 if console_type==None or console_type=='novnc':
3152
3153 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3154
3155 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3156 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3157 response = self.perform_request(req_type='POST',
3158 url=url_rest_call,
3159 headers=headers)
3160
3161 if response.status_code == 403:
3162 response = self.retry_rest('GET', url_rest_call)
3163
3164 if response.status_code != 200:
3165 self.logger.error("REST call {} failed reason : {}"\
3166 "status code : {}".format(url_rest_call,
3167 response.content,
3168 response.status_code))
3169 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3170 "VM Mks ticket details")
3171 s = re.search("<Host>(.*?)</Host>",response.content)
3172 console_dict['server'] = s.group(1) if s else None
3173 s1 = re.search("<Port>(\d+)</Port>",response.content)
3174 console_dict['port'] = s1.group(1) if s1 else None
3175
3176
3177 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3178
3179 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3180 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3181 response = self.perform_request(req_type='POST',
3182 url=url_rest_call,
3183 headers=headers)
3184
3185 if response.status_code == 403:
3186 response = self.retry_rest('GET', url_rest_call)
3187
3188 if response.status_code != 200:
3189 self.logger.error("REST call {} failed reason : {}"\
3190 "status code : {}".format(url_rest_call,
3191 response.content,
3192 response.status_code))
3193 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3194 "VM console details")
3195 s = re.search(">.*?/(vm-\d+.*)</",response.content)
3196 console_dict['suffix'] = s.group(1) if s else None
3197 console_dict['protocol'] = "https"
3198
3199 return console_dict
3200
3201 # NOT USED METHODS in current version
3202
3203 def host_vim2gui(self, host, server_dict):
3204 """Transform host dictionary from VIM format to GUI format,
3205 and append to the server_dict
3206 """
3207 raise vimconn.vimconnNotImplemented("Should have implemented this")
3208
3209 def get_hosts_info(self):
3210 """Get the information of deployed hosts
3211 Returns the hosts content"""
3212 raise vimconn.vimconnNotImplemented("Should have implemented this")
3213
3214 def get_hosts(self, vim_tenant):
3215 """Get the hosts and deployed instances
3216 Returns the hosts content"""
3217 raise vimconn.vimconnNotImplemented("Should have implemented this")
3218
3219 def get_processor_rankings(self):
3220 """Get the processor rankings in the VIM database"""
3221 raise vimconn.vimconnNotImplemented("Should have implemented this")
3222
3223 def new_host(self, host_data):
3224 """Adds a new host to VIM"""
3225 '''Returns status code of the VIM response'''
3226 raise vimconn.vimconnNotImplemented("Should have implemented this")
3227
3228 def new_external_port(self, port_data):
3229 """Adds a external port to VIM"""
3230 '''Returns the port identifier'''
3231 raise vimconn.vimconnNotImplemented("Should have implemented this")
3232
3233 def new_external_network(self, net_name, net_type):
3234 """Adds a external network to VIM (shared)"""
3235 '''Returns the network identifier'''
3236 raise vimconn.vimconnNotImplemented("Should have implemented this")
3237
3238 def connect_port_network(self, port_id, network_id, admin=False):
3239 """Connects a external port to a network"""
3240 '''Returns status code of the VIM response'''
3241 raise vimconn.vimconnNotImplemented("Should have implemented this")
3242
3243 def new_vminstancefromJSON(self, vm_data):
3244 """Adds a VM instance to VIM"""
3245 '''Returns the instance identifier'''
3246 raise vimconn.vimconnNotImplemented("Should have implemented this")
3247
3248 def get_network_name_by_id(self, network_uuid=None):
3249 """Method gets vcloud director network named based on supplied uuid.
3250
3251 Args:
3252 network_uuid: network_id
3253
3254 Returns:
3255 The return network name.
3256 """
3257
3258 if not network_uuid:
3259 return None
3260
3261 try:
3262 org_dict = self.get_org(self.org_uuid)
3263 if 'networks' in org_dict:
3264 org_network_dict = org_dict['networks']
3265 for net_uuid in org_network_dict:
3266 if net_uuid == network_uuid:
3267 return org_network_dict[net_uuid]
3268 except:
3269 self.logger.debug("Exception in get_network_name_by_id")
3270 self.logger.debug(traceback.format_exc())
3271
3272 return None
3273
3274 def get_network_id_by_name(self, network_name=None):
3275 """Method gets vcloud director network uuid based on supplied name.
3276
3277 Args:
3278 network_name: network_name
3279 Returns:
3280 The return network uuid.
3281 network_uuid: network_id
3282 """
3283 if not network_name:
3284 self.logger.debug("get_network_id_by_name() : Network name is empty")
3285 return None
3286
3287 try:
3288 org_dict = self.get_org(self.org_uuid)
3289 if org_dict and 'networks' in org_dict:
3290 org_network_dict = org_dict['networks']
3291 for net_uuid, net_name in org_network_dict.items():
3292 if net_name == network_name:
3293 return net_uuid
3294
3295 except KeyError as exp:
3296 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3297
3298 return None
3299
3300 def get_physical_network_by_name(self, physical_network_name):
3301 '''
3302 Methos returns uuid of physical network which passed
3303 Args:
3304 physical_network_name: physical network name
3305 Returns:
3306 UUID of physical_network_name
3307 '''
3308 try:
3309 client_as_admin = self.connect_as_admin()
3310 if not client_as_admin:
3311 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3312 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3313 vm_list_rest_call = ''.join(url_list)
3314
3315 if client_as_admin._session:
3316 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3317 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3318
3319 response = self.perform_request(req_type='GET',
3320 url=vm_list_rest_call,
3321 headers=headers)
3322
3323 provider_network = None
3324 available_network = None
3325 add_vdc_rest_url = None
3326
3327 if response.status_code != requests.codes.ok:
3328 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3329 response.status_code))
3330 return None
3331 else:
3332 try:
3333 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3334 for child in vm_list_xmlroot:
3335
3336 if child.tag.split("}")[1] == 'ProviderVdcReference':
3337 provider_network = child.attrib.get('href')
3338 # application/vnd.vmware.admin.providervdc+xml
3339 if child.tag.split("}")[1] == 'Link':
3340 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3341 and child.attrib.get('rel') == 'add':
3342 add_vdc_rest_url = child.attrib.get('href')
3343 except:
3344 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3345 self.logger.debug("Respond body {}".format(response.content))
3346 return None
3347
3348 # find pvdc provided available network
3349 response = self.perform_request(req_type='GET',
3350 url=provider_network,
3351 headers=headers)
3352
3353 if response.status_code != requests.codes.ok:
3354 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3355 response.status_code))
3356 return None
3357
3358 try:
3359 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3360 for child in vm_list_xmlroot.iter():
3361 if child.tag.split("}")[1] == 'AvailableNetworks':
3362 for networks in child.iter():
3363 if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
3364 if networks.attrib.get('name') == physical_network_name:
3365 network_url = networks.attrib.get('href')
3366 available_network = network_url[network_url.rindex('/')+1:]
3367 break
3368 except Exception as e:
3369 return None
3370
3371 return available_network
3372 except Exception as e:
3373 self.logger.error("Error while getting physical network: {}".format(e))
3374
3375 def list_org_action(self):
3376 """
3377 Method leverages vCloud director and query for available organization for particular user
3378
3379 Args:
3380 vca - is active VCA connection.
3381 vdc_name - is a vdc name that will be used to query vms action
3382
3383 Returns:
3384 The return XML respond
3385 """
3386 url_list = [self.url, '/api/org']
3387 vm_list_rest_call = ''.join(url_list)
3388
3389 if self.client._session:
3390 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3391 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3392
3393 response = self.perform_request(req_type='GET',
3394 url=vm_list_rest_call,
3395 headers=headers)
3396
3397 if response.status_code == 403:
3398 response = self.retry_rest('GET', vm_list_rest_call)
3399
3400 if response.status_code == requests.codes.ok:
3401 return response.content
3402
3403 return None
3404
3405 def get_org_action(self, org_uuid=None):
3406 """
3407 Method leverages vCloud director and retrieve available object for organization.
3408
3409 Args:
3410 org_uuid - vCD organization uuid
3411 self.client - is active connection.
3412
3413 Returns:
3414 The return XML respond
3415 """
3416
3417 if org_uuid is None:
3418 return None
3419
3420 url_list = [self.url, '/api/org/', org_uuid]
3421 vm_list_rest_call = ''.join(url_list)
3422
3423 if self.client._session:
3424 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3425 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3426
3427 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3428 response = self.perform_request(req_type='GET',
3429 url=vm_list_rest_call,
3430 headers=headers)
3431 if response.status_code == 403:
3432 response = self.retry_rest('GET', vm_list_rest_call)
3433
3434 if response.status_code == requests.codes.ok:
3435 return response.content
3436 return None
3437
3438 def get_org(self, org_uuid=None):
3439 """
3440 Method retrieves available organization in vCloud Director
3441
3442 Args:
3443 org_uuid - is a organization uuid.
3444
3445 Returns:
3446 The return dictionary with following key
3447 "network" - for network list under the org
3448 "catalogs" - for network list under the org
3449 "vdcs" - for vdc list under org
3450 """
3451
3452 org_dict = {}
3453
3454 if org_uuid is None:
3455 return org_dict
3456
3457 content = self.get_org_action(org_uuid=org_uuid)
3458 try:
3459 vdc_list = {}
3460 network_list = {}
3461 catalog_list = {}
3462 vm_list_xmlroot = XmlElementTree.fromstring(content)
3463 for child in vm_list_xmlroot:
3464 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3465 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3466 org_dict['vdcs'] = vdc_list
3467 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3468 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3469 org_dict['networks'] = network_list
3470 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3471 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3472 org_dict['catalogs'] = catalog_list
3473 except:
3474 pass
3475
3476 return org_dict
3477
3478 def get_org_list(self):
3479 """
3480 Method retrieves available organization in vCloud Director
3481
3482 Args:
3483 vca - is active VCA connection.
3484
3485 Returns:
3486 The return dictionary and key for each entry VDC UUID
3487 """
3488
3489 org_dict = {}
3490
3491 content = self.list_org_action()
3492 try:
3493 vm_list_xmlroot = XmlElementTree.fromstring(content)
3494 for vm_xml in vm_list_xmlroot:
3495 if vm_xml.tag.split("}")[1] == 'Org':
3496 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3497 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3498 except:
3499 pass
3500
3501 return org_dict
3502
3503 def vms_view_action(self, vdc_name=None):
3504 """ Method leverages vCloud director vms query call
3505
3506 Args:
3507 vca - is active VCA connection.
3508 vdc_name - is a vdc name that will be used to query vms action
3509
3510 Returns:
3511 The return XML respond
3512 """
3513 vca = self.connect()
3514 if vdc_name is None:
3515 return None
3516
3517 url_list = [vca.host, '/api/vms/query']
3518 vm_list_rest_call = ''.join(url_list)
3519
3520 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3521 refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
3522 ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3523 if len(refs) == 1:
3524 response = Http.get(url=vm_list_rest_call,
3525 headers=vca.vcloud_session.get_vcloud_headers(),
3526 verify=vca.verify,
3527 logger=vca.logger)
3528 if response.status_code == requests.codes.ok:
3529 return response.content
3530
3531 return None
3532
3533 def get_vapp_list(self, vdc_name=None):
3534 """
3535 Method retrieves vApp list deployed vCloud director and returns a dictionary
3536 contains a list of all vapp deployed for queried VDC.
3537 The key for a dictionary is vApp UUID
3538
3539
3540 Args:
3541 vca - is active VCA connection.
3542 vdc_name - is a vdc name that will be used to query vms action
3543
3544 Returns:
3545 The return dictionary and key for each entry vapp UUID
3546 """
3547
3548 vapp_dict = {}
3549 if vdc_name is None:
3550 return vapp_dict
3551
3552 content = self.vms_view_action(vdc_name=vdc_name)
3553 try:
3554 vm_list_xmlroot = XmlElementTree.fromstring(content)
3555 for vm_xml in vm_list_xmlroot:
3556 if vm_xml.tag.split("}")[1] == 'VMRecord':
3557 if vm_xml.attrib['isVAppTemplate'] == 'true':
3558 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3559 if 'vappTemplate-' in rawuuid[0]:
3560 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3561 # vm and use raw UUID as key
3562 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3563 except:
3564 pass
3565
3566 return vapp_dict
3567
3568 def get_vm_list(self, vdc_name=None):
3569 """
3570 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3571 contains a list of all VM's deployed for queried VDC.
3572 The key for a dictionary is VM UUID
3573
3574
3575 Args:
3576 vca - is active VCA connection.
3577 vdc_name - is a vdc name that will be used to query vms action
3578
3579 Returns:
3580 The return dictionary and key for each entry vapp UUID
3581 """
3582 vm_dict = {}
3583
3584 if vdc_name is None:
3585 return vm_dict
3586
3587 content = self.vms_view_action(vdc_name=vdc_name)
3588 try:
3589 vm_list_xmlroot = XmlElementTree.fromstring(content)
3590 for vm_xml in vm_list_xmlroot:
3591 if vm_xml.tag.split("}")[1] == 'VMRecord':
3592 if vm_xml.attrib['isVAppTemplate'] == 'false':
3593 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3594 if 'vm-' in rawuuid[0]:
3595 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3596 # vm and use raw UUID as key
3597 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3598 except:
3599 pass
3600
3601 return vm_dict
3602
3603 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3604 """
3605 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3606 contains a list of all VM's deployed for queried VDC.
3607 The key for a dictionary is VM UUID
3608
3609
3610 Args:
3611 vca - is active VCA connection.
3612 vdc_name - is a vdc name that will be used to query vms action
3613
3614 Returns:
3615 The return dictionary and key for each entry vapp UUID
3616 """
3617 vm_dict = {}
3618 vca = self.connect()
3619 if not vca:
3620 raise vimconn.vimconnConnectionException("self.connect() is failed")
3621
3622 if vdc_name is None:
3623 return vm_dict
3624
3625 content = self.vms_view_action(vdc_name=vdc_name)
3626 try:
3627 vm_list_xmlroot = XmlElementTree.fromstring(content)
3628 for vm_xml in vm_list_xmlroot:
3629 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3630 # lookup done by UUID
3631 if isuuid:
3632 if vapp_name in vm_xml.attrib['container']:
3633 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3634 if 'vm-' in rawuuid[0]:
3635 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3636 break
3637 # lookup done by Name
3638 else:
3639 if vapp_name in vm_xml.attrib['name']:
3640 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3641 if 'vm-' in rawuuid[0]:
3642 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3643 break
3644 except:
3645 pass
3646
3647 return vm_dict
3648
3649 def get_network_action(self, network_uuid=None):
3650 """
3651 Method leverages vCloud director and query network based on network uuid
3652
3653 Args:
3654 vca - is active VCA connection.
3655 network_uuid - is a network uuid
3656
3657 Returns:
3658 The return XML respond
3659 """
3660
3661 if network_uuid is None:
3662 return None
3663
3664 url_list = [self.url, '/api/network/', network_uuid]
3665 vm_list_rest_call = ''.join(url_list)
3666
3667 if self.client._session:
3668 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3669 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3670
3671 response = self.perform_request(req_type='GET',
3672 url=vm_list_rest_call,
3673 headers=headers)
3674 #Retry login if session expired & retry sending request
3675 if response.status_code == 403:
3676 response = self.retry_rest('GET', vm_list_rest_call)
3677
3678 if response.status_code == requests.codes.ok:
3679 return response.content
3680
3681 return None
3682
3683 def get_vcd_network(self, network_uuid=None):
3684 """
3685 Method retrieves available network from vCloud Director
3686
3687 Args:
3688 network_uuid - is VCD network UUID
3689
3690 Each element serialized as key : value pair
3691
3692 Following keys available for access. network_configuration['Gateway'}
3693 <Configuration>
3694 <IpScopes>
3695 <IpScope>
3696 <IsInherited>true</IsInherited>
3697 <Gateway>172.16.252.100</Gateway>
3698 <Netmask>255.255.255.0</Netmask>
3699 <Dns1>172.16.254.201</Dns1>
3700 <Dns2>172.16.254.202</Dns2>
3701 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3702 <IsEnabled>true</IsEnabled>
3703 <IpRanges>
3704 <IpRange>
3705 <StartAddress>172.16.252.1</StartAddress>
3706 <EndAddress>172.16.252.99</EndAddress>
3707 </IpRange>
3708 </IpRanges>
3709 </IpScope>
3710 </IpScopes>
3711 <FenceMode>bridged</FenceMode>
3712
3713 Returns:
3714 The return dictionary and key for each entry vapp UUID
3715 """
3716
3717 network_configuration = {}
3718 if network_uuid is None:
3719 return network_uuid
3720
3721 try:
3722 content = self.get_network_action(network_uuid=network_uuid)
3723 if content is not None:
3724 vm_list_xmlroot = XmlElementTree.fromstring(content)
3725
3726 network_configuration['status'] = vm_list_xmlroot.get("status")
3727 network_configuration['name'] = vm_list_xmlroot.get("name")
3728 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3729
3730 for child in vm_list_xmlroot:
3731 if child.tag.split("}")[1] == 'IsShared':
3732 network_configuration['isShared'] = child.text.strip()
3733 if child.tag.split("}")[1] == 'Configuration':
3734 for configuration in child.iter():
3735 tagKey = configuration.tag.split("}")[1].strip()
3736 if tagKey != "":
3737 network_configuration[tagKey] = configuration.text.strip()
3738 except Exception as exp :
3739 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3740 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3741
3742 return network_configuration
3743
3744 def delete_network_action(self, network_uuid=None):
3745 """
3746 Method delete given network from vCloud director
3747
3748 Args:
3749 network_uuid - is a network uuid that client wish to delete
3750
3751 Returns:
3752 The return None or XML respond or false
3753 """
3754 client = self.connect_as_admin()
3755 if not client:
3756 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3757 if network_uuid is None:
3758 return False
3759
3760 url_list = [self.url, '/api/admin/network/', network_uuid]
3761 vm_list_rest_call = ''.join(url_list)
3762
3763 if client._session:
3764 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3765 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3766 response = self.perform_request(req_type='DELETE',
3767 url=vm_list_rest_call,
3768 headers=headers)
3769 if response.status_code == 202:
3770 return True
3771
3772 return False
3773
3774 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3775 ip_profile=None, isshared='true'):
3776 """
3777 Method create network in vCloud director
3778
3779 Args:
3780 network_name - is network name to be created.
3781 net_type - can be 'bridge','data','ptp','mgmt'.
3782 ip_profile is a dict containing the IP parameters of the network
3783 isshared - is a boolean
3784 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3785 It optional attribute. by default if no parent network indicate the first available will be used.
3786
3787 Returns:
3788 The return network uuid or return None
3789 """
3790
3791 new_network_name = [network_name, '-', str(uuid.uuid4())]
3792 content = self.create_network_rest(network_name=''.join(new_network_name),
3793 ip_profile=ip_profile,
3794 net_type=net_type,
3795 parent_network_uuid=parent_network_uuid,
3796 isshared=isshared)
3797 if content is None:
3798 self.logger.debug("Failed create network {}.".format(network_name))
3799 return None
3800
3801 try:
3802 vm_list_xmlroot = XmlElementTree.fromstring(content)
3803 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3804 if len(vcd_uuid) == 4:
3805 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3806 return vcd_uuid[3]
3807 except:
3808 self.logger.debug("Failed create network {}".format(network_name))
3809 return None
3810
3811 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3812 ip_profile=None, isshared='true'):
3813 """
3814 Method create network in vCloud director
3815
3816 Args:
3817 network_name - is network name to be created.
3818 net_type - can be 'bridge','data','ptp','mgmt'.
3819 ip_profile is a dict containing the IP parameters of the network
3820 isshared - is a boolean
3821 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3822 It optional attribute. by default if no parent network indicate the first available will be used.
3823
3824 Returns:
3825 The return network uuid or return None
3826 """
3827 client_as_admin = self.connect_as_admin()
3828 if not client_as_admin:
3829 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3830 if network_name is None:
3831 return None
3832
3833 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3834 vm_list_rest_call = ''.join(url_list)
3835
3836 if client_as_admin._session:
3837 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3838 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3839
3840 response = self.perform_request(req_type='GET',
3841 url=vm_list_rest_call,
3842 headers=headers)
3843
3844 provider_network = None
3845 available_networks = None
3846 add_vdc_rest_url = None
3847
3848 if response.status_code != requests.codes.ok:
3849 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3850 response.status_code))
3851 return None
3852 else:
3853 try:
3854 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3855 for child in vm_list_xmlroot:
3856
3857 if child.tag.split("}")[1] == 'ProviderVdcReference':
3858 provider_network = child.attrib.get('href')
3859 # application/vnd.vmware.admin.providervdc+xml
3860 if child.tag.split("}")[1] == 'Link':
3861 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3862 and child.attrib.get('rel') == 'add':
3863 add_vdc_rest_url = child.attrib.get('href')
3864 except:
3865 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3866 self.logger.debug("Respond body {}".format(response.content))
3867 return None
3868
3869 # find pvdc provided available network
3870 response = self.perform_request(req_type='GET',
3871 url=provider_network,
3872 headers=headers)
3873
3874 if response.status_code != requests.codes.ok:
3875 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3876 response.status_code))
3877 return None
3878
3879 if parent_network_uuid is None:
3880 try:
3881 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3882 for child in vm_list_xmlroot.iter():
3883 if child.tag.split("}")[1] == 'AvailableNetworks':
3884 for networks in child.iter():
3885 # application/vnd.vmware.admin.network+xml
3886 if networks.attrib.get('href') is not None:
3887 available_networks = networks.attrib.get('href')
3888 break
3889 except:
3890 return None
3891
3892 try:
3893 #Configure IP profile of the network
3894 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3895
3896 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3897 subnet_rand = random.randint(0, 255)
3898 ip_base = "192.168.{}.".format(subnet_rand)
3899 ip_profile['subnet_address'] = ip_base + "0/24"
3900 else:
3901 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3902
3903 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3904 ip_profile['gateway_address']=ip_base + "1"
3905 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3906 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3907 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3908 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3909 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3910 ip_profile['dhcp_start_address']=ip_base + "3"
3911 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3912 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3913 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3914 ip_profile['dns_address']=ip_base + "2"
3915
3916 gateway_address=ip_profile['gateway_address']
3917 dhcp_count=int(ip_profile['dhcp_count'])
3918 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3919
3920 if ip_profile['dhcp_enabled']==True:
3921 dhcp_enabled='true'
3922 else:
3923 dhcp_enabled='false'
3924 dhcp_start_address=ip_profile['dhcp_start_address']
3925
3926 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3927 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3928 end_ip_int += dhcp_count - 1
3929 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3930
3931 ip_version=ip_profile['ip_version']
3932 dns_address=ip_profile['dns_address']
3933 except KeyError as exp:
3934 self.logger.debug("Create Network REST: Key error {}".format(exp))
3935 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3936
3937 # either use client provided UUID or search for a first available
3938 # if both are not defined we return none
3939 if parent_network_uuid is not None:
3940 provider_network = None
3941 available_networks = None
3942 add_vdc_rest_url = None
3943
3944 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3945 add_vdc_rest_url = ''.join(url_list)
3946
3947 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3948 available_networks = ''.join(url_list)
3949
3950 #Creating all networks as Direct Org VDC type networks.
3951 #Unused in case of Underlay (data/ptp) network interface.
3952 fence_mode="isolated"
3953 is_inherited='false'
3954 dns_list = dns_address.split(";")
3955 dns1 = dns_list[0]
3956 dns2_text = ""
3957 if len(dns_list) >= 2:
3958 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3959 if net_type == "isolated":
3960 fence_mode="isolated"
3961 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3962 <Description>Openmano created</Description>
3963 <Configuration>
3964 <IpScopes>
3965 <IpScope>
3966 <IsInherited>{1:s}</IsInherited>
3967 <Gateway>{2:s}</Gateway>
3968 <Netmask>{3:s}</Netmask>
3969 <Dns1>{4:s}</Dns1>{5:s}
3970 <IsEnabled>{6:s}</IsEnabled>
3971 <IpRanges>
3972 <IpRange>
3973 <StartAddress>{7:s}</StartAddress>
3974 <EndAddress>{8:s}</EndAddress>
3975 </IpRange>
3976 </IpRanges>
3977 </IpScope>
3978 </IpScopes>
3979 <FenceMode>{9:s}</FenceMode>
3980 </Configuration>
3981 <IsShared>{10:s}</IsShared>
3982 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3983 subnet_address, dns1, dns2_text, dhcp_enabled,
3984 dhcp_start_address, dhcp_end_address,
3985 fence_mode, isshared)
3986 else:
3987 fence_mode = "bridged"
3988 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3989 <Description>Openmano created</Description>
3990 <Configuration>
3991 <IpScopes>
3992 <IpScope>
3993 <IsInherited>{1:s}</IsInherited>
3994 <Gateway>{2:s}</Gateway>
3995 <Netmask>{3:s}</Netmask>
3996 <Dns1>{4:s}</Dns1>{5:s}
3997 <IsEnabled>{6:s}</IsEnabled>
3998 <IpRanges>
3999 <IpRange>
4000 <StartAddress>{7:s}</StartAddress>
4001 <EndAddress>{8:s}</EndAddress>
4002 </IpRange>
4003 </IpRanges>
4004 </IpScope>
4005 </IpScopes>
4006 <ParentNetwork href="{9:s}"/>
4007 <FenceMode>{10:s}</FenceMode>
4008 </Configuration>
4009 <IsShared>{11:s}</IsShared>
4010 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
4011 subnet_address, dns1, dns2_text, dhcp_enabled,
4012 dhcp_start_address, dhcp_end_address, available_networks,
4013 fence_mode, isshared)
4014
4015 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
4016 try:
4017 response = self.perform_request(req_type='POST',
4018 url=add_vdc_rest_url,
4019 headers=headers,
4020 data=data)
4021
4022 if response.status_code != 201:
4023 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
4024 .format(response.status_code,response.content))
4025 else:
4026 network_task = self.get_task_from_response(response.content)
4027 self.logger.debug("Create Network REST : Waiting for Network creation complete")
4028 time.sleep(5)
4029 result = self.client.get_task_monitor().wait_for_success(task=network_task)
4030 if result.get('status') == 'success':
4031 return response.content
4032 else:
4033 self.logger.debug("create_network_rest task failed. Network Create response : {}"
4034 .format(response.content))
4035 except Exception as exp:
4036 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
4037
4038 return None
4039
4040 def convert_cidr_to_netmask(self, cidr_ip=None):
4041 """
4042 Method sets convert CIDR netmask address to normal IP format
4043 Args:
4044 cidr_ip : CIDR IP address
4045 Returns:
4046 netmask : Converted netmask
4047 """
4048 if cidr_ip is not None:
4049 if '/' in cidr_ip:
4050 network, net_bits = cidr_ip.split('/')
4051 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
4052 else:
4053 netmask = cidr_ip
4054 return netmask
4055 return None
4056
4057 def get_provider_rest(self, vca=None):
4058 """
4059 Method gets provider vdc view from vcloud director
4060
4061 Args:
4062 network_name - is network name to be created.
4063 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4064 It optional attribute. by default if no parent network indicate the first available will be used.
4065
4066 Returns:
4067 The return xml content of respond or None
4068 """
4069
4070 url_list = [self.url, '/api/admin']
4071 if vca:
4072 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4073 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4074 response = self.perform_request(req_type='GET',
4075 url=''.join(url_list),
4076 headers=headers)
4077
4078 if response.status_code == requests.codes.ok:
4079 return response.content
4080 return None
4081
4082 def create_vdc(self, vdc_name=None):
4083
4084 vdc_dict = {}
4085
4086 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4087 if xml_content is not None:
4088 try:
4089 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4090 for child in task_resp_xmlroot:
4091 if child.tag.split("}")[1] == 'Owner':
4092 vdc_id = child.attrib.get('href').split("/")[-1]
4093 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
4094 return vdc_dict
4095 except:
4096 self.logger.debug("Respond body {}".format(xml_content))
4097
4098 return None
4099
4100 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4101 """
4102 Method create vdc in vCloud director based on VDC template.
4103 it uses pre-defined template.
4104
4105 Args:
4106 vdc_name - name of a new vdc.
4107
4108 Returns:
4109 The return xml content of respond or None
4110 """
4111 # pre-requesite atleast one vdc template should be available in vCD
4112 self.logger.info("Creating new vdc {}".format(vdc_name))
4113 vca = self.connect_as_admin()
4114 if not vca:
4115 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4116 if vdc_name is None:
4117 return None
4118
4119 url_list = [self.url, '/api/vdcTemplates']
4120 vm_list_rest_call = ''.join(url_list)
4121
4122 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4123 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4124 response = self.perform_request(req_type='GET',
4125 url=vm_list_rest_call,
4126 headers=headers)
4127
4128 # container url to a template
4129 vdc_template_ref = None
4130 try:
4131 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4132 for child in vm_list_xmlroot:
4133 # application/vnd.vmware.admin.providervdc+xml
4134 # we need find a template from witch we instantiate VDC
4135 if child.tag.split("}")[1] == 'VdcTemplate':
4136 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4137 vdc_template_ref = child.attrib.get('href')
4138 except:
4139 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4140 self.logger.debug("Respond body {}".format(response.content))
4141 return None
4142
4143 # if we didn't found required pre defined template we return None
4144 if vdc_template_ref is None:
4145 return None
4146
4147 try:
4148 # instantiate vdc
4149 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4150 vm_list_rest_call = ''.join(url_list)
4151 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4152 <Source href="{1:s}"></Source>
4153 <Description>opnemano</Description>
4154 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4155
4156 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4157
4158 response = self.perform_request(req_type='POST',
4159 url=vm_list_rest_call,
4160 headers=headers,
4161 data=data)
4162
4163 vdc_task = self.get_task_from_response(response.content)
4164 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4165
4166 # if we all ok we respond with content otherwise by default None
4167 if response.status_code >= 200 and response.status_code < 300:
4168 return response.content
4169 return None
4170 except:
4171 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4172 self.logger.debug("Respond body {}".format(response.content))
4173
4174 return None
4175
4176 def create_vdc_rest(self, vdc_name=None):
4177 """
4178 Method create network in vCloud director
4179
4180 Args:
4181 vdc_name - vdc name to be created
4182 Returns:
4183 The return response
4184 """
4185
4186 self.logger.info("Creating new vdc {}".format(vdc_name))
4187
4188 vca = self.connect_as_admin()
4189 if not vca:
4190 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4191 if vdc_name is None:
4192 return None
4193
4194 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4195 vm_list_rest_call = ''.join(url_list)
4196
4197 if vca._session:
4198 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4199 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4200 response = self.perform_request(req_type='GET',
4201 url=vm_list_rest_call,
4202 headers=headers)
4203
4204 provider_vdc_ref = None
4205 add_vdc_rest_url = None
4206 available_networks = None
4207
4208 if response.status_code != requests.codes.ok:
4209 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4210 response.status_code))
4211 return None
4212 else:
4213 try:
4214 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4215 for child in vm_list_xmlroot:
4216 # application/vnd.vmware.admin.providervdc+xml
4217 if child.tag.split("}")[1] == 'Link':
4218 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4219 and child.attrib.get('rel') == 'add':
4220 add_vdc_rest_url = child.attrib.get('href')
4221 except:
4222 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4223 self.logger.debug("Respond body {}".format(response.content))
4224 return None
4225
4226 response = self.get_provider_rest(vca=vca)
4227 try:
4228 vm_list_xmlroot = XmlElementTree.fromstring(response)
4229 for child in vm_list_xmlroot:
4230 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4231 for sub_child in child:
4232 provider_vdc_ref = sub_child.attrib.get('href')
4233 except:
4234 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4235 self.logger.debug("Respond body {}".format(response))
4236 return None
4237
4238 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4239 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4240 <AllocationModel>ReservationPool</AllocationModel>
4241 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4242 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4243 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4244 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4245 <ProviderVdcReference
4246 name="Main Provider"
4247 href="{2:s}" />
4248 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4249 escape(vdc_name),
4250 provider_vdc_ref)
4251
4252 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4253
4254 response = self.perform_request(req_type='POST',
4255 url=add_vdc_rest_url,
4256 headers=headers,
4257 data=data)
4258
4259 # if we all ok we respond with content otherwise by default None
4260 if response.status_code == 201:
4261 return response.content
4262 return None
4263
4264 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4265 """
4266 Method retrieve vapp detail from vCloud director
4267
4268 Args:
4269 vapp_uuid - is vapp identifier.
4270
4271 Returns:
4272 The return network uuid or return None
4273 """
4274
4275 parsed_respond = {}
4276 vca = None
4277
4278 if need_admin_access:
4279 vca = self.connect_as_admin()
4280 else:
4281 vca = self.client
4282
4283 if not vca:
4284 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4285 if vapp_uuid is None:
4286 return None
4287
4288 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4289 get_vapp_restcall = ''.join(url_list)
4290
4291 if vca._session:
4292 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4293 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4294 response = self.perform_request(req_type='GET',
4295 url=get_vapp_restcall,
4296 headers=headers)
4297
4298 if response.status_code == 403:
4299 if need_admin_access == False:
4300 response = self.retry_rest('GET', get_vapp_restcall)
4301
4302 if response.status_code != requests.codes.ok:
4303 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4304 response.status_code))
4305 return parsed_respond
4306
4307 try:
4308 xmlroot_respond = XmlElementTree.fromstring(response.content)
4309 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4310
4311 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4312 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4313 'vmw': 'http://www.vmware.com/schema/ovf',
4314 'vm': 'http://www.vmware.com/vcloud/v1.5',
4315 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4316 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4317 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4318 }
4319
4320 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4321 if created_section is not None:
4322 parsed_respond['created'] = created_section.text
4323
4324 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4325 if network_section is not None and 'networkName' in network_section.attrib:
4326 parsed_respond['networkname'] = network_section.attrib['networkName']
4327
4328 ipscopes_section = \
4329 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4330 namespaces)
4331 if ipscopes_section is not None:
4332 for ipscope in ipscopes_section:
4333 for scope in ipscope:
4334 tag_key = scope.tag.split("}")[1]
4335 if tag_key == 'IpRanges':
4336 ip_ranges = scope.getchildren()
4337 for ipblock in ip_ranges:
4338 for block in ipblock:
4339 parsed_respond[block.tag.split("}")[1]] = block.text
4340 else:
4341 parsed_respond[tag_key] = scope.text
4342
4343 # parse children section for other attrib
4344 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4345 if children_section is not None:
4346 parsed_respond['name'] = children_section.attrib['name']
4347 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4348 if "nestedHypervisorEnabled" in children_section.attrib else None
4349 parsed_respond['deployed'] = children_section.attrib['deployed']
4350 parsed_respond['status'] = children_section.attrib['status']
4351 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4352 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4353 nic_list = []
4354 for adapters in network_adapter:
4355 adapter_key = adapters.tag.split("}")[1]
4356 if adapter_key == 'PrimaryNetworkConnectionIndex':
4357 parsed_respond['primarynetwork'] = adapters.text
4358 if adapter_key == 'NetworkConnection':
4359 vnic = {}
4360 if 'network' in adapters.attrib:
4361 vnic['network'] = adapters.attrib['network']
4362 for adapter in adapters:
4363 setting_key = adapter.tag.split("}")[1]
4364 vnic[setting_key] = adapter.text
4365 nic_list.append(vnic)
4366
4367 for link in children_section:
4368 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4369 if link.attrib['rel'] == 'screen:acquireTicket':
4370 parsed_respond['acquireTicket'] = link.attrib
4371 if link.attrib['rel'] == 'screen:acquireMksTicket':
4372 parsed_respond['acquireMksTicket'] = link.attrib
4373
4374 parsed_respond['interfaces'] = nic_list
4375 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4376 if vCloud_extension_section is not None:
4377 vm_vcenter_info = {}
4378 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4379 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4380 if vmext is not None:
4381 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4382 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4383
4384 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4385 vm_virtual_hardware_info = {}
4386 if virtual_hardware_section is not None:
4387 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4388 if item.find("rasd:Description",namespaces).text == "Hard disk":
4389 disk_size = item.find("rasd:HostResource" ,namespaces
4390 ).attrib["{"+namespaces['vm']+"}capacity"]
4391
4392 vm_virtual_hardware_info["disk_size"]= disk_size
4393 break
4394
4395 for link in virtual_hardware_section:
4396 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4397 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4398 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4399 break
4400
4401 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4402 except Exception as exp :
4403 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4404 return parsed_respond
4405
4406 def acquire_console(self, vm_uuid=None):
4407
4408 if vm_uuid is None:
4409 return None
4410 if self.client._session:
4411 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4412 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4413 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4414 console_dict = vm_dict['acquireTicket']
4415 console_rest_call = console_dict['href']
4416
4417 response = self.perform_request(req_type='POST',
4418 url=console_rest_call,
4419 headers=headers)
4420
4421 if response.status_code == 403:
4422 response = self.retry_rest('POST', console_rest_call)
4423
4424 if response.status_code == requests.codes.ok:
4425 return response.content
4426
4427 return None
4428
4429 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4430 """
4431 Method retrieve vm disk details
4432
4433 Args:
4434 vapp_uuid - is vapp identifier.
4435 flavor_disk - disk size as specified in VNFD (flavor)
4436
4437 Returns:
4438 The return network uuid or return None
4439 """
4440 status = None
4441 try:
4442 #Flavor disk is in GB convert it into MB
4443 flavor_disk = int(flavor_disk) * 1024
4444 vm_details = self.get_vapp_details_rest(vapp_uuid)
4445 if vm_details:
4446 vm_name = vm_details["name"]
4447 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4448
4449 if vm_details and "vm_virtual_hardware" in vm_details:
4450 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4451 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4452
4453 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4454
4455 if flavor_disk > vm_disk:
4456 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4457 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4458 vm_disk, flavor_disk ))
4459 else:
4460 status = True
4461 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4462
4463 return status
4464 except Exception as exp:
4465 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4466
4467
4468 def modify_vm_disk_rest(self, disk_href , disk_size):
4469 """
4470 Method retrieve modify vm disk size
4471
4472 Args:
4473 disk_href - vCD API URL to GET and PUT disk data
4474 disk_size - disk size as specified in VNFD (flavor)
4475
4476 Returns:
4477 The return network uuid or return None
4478 """
4479 if disk_href is None or disk_size is None:
4480 return None
4481
4482 if self.client._session:
4483 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4484 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4485 response = self.perform_request(req_type='GET',
4486 url=disk_href,
4487 headers=headers)
4488
4489 if response.status_code == 403:
4490 response = self.retry_rest('GET', disk_href)
4491
4492 if response.status_code != requests.codes.ok:
4493 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4494 response.status_code))
4495 return None
4496 try:
4497 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4498 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
4499 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4500
4501 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4502 if item.find("rasd:Description",namespaces).text == "Hard disk":
4503 disk_item = item.find("rasd:HostResource" ,namespaces )
4504 if disk_item is not None:
4505 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4506 break
4507
4508 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4509 xml_declaration=True)
4510
4511 #Send PUT request to modify disk size
4512 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4513
4514 response = self.perform_request(req_type='PUT',
4515 url=disk_href,
4516 headers=headers,
4517 data=data)
4518 if response.status_code == 403:
4519 add_headers = {'Content-Type': headers['Content-Type']}
4520 response = self.retry_rest('PUT', disk_href, add_headers, data)
4521
4522 if response.status_code != 202:
4523 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4524 response.status_code))
4525 else:
4526 modify_disk_task = self.get_task_from_response(response.content)
4527 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4528 if result.get('status') == 'success':
4529 return True
4530 else:
4531 return False
4532 return None
4533
4534 except Exception as exp :
4535 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4536 return None
4537
4538 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4539 """
4540 Method to attach pci devices to VM
4541
4542 Args:
4543 vapp_uuid - uuid of vApp/VM
4544 pci_devices - pci devices infromation as specified in VNFD (flavor)
4545
4546 Returns:
4547 The status of add pci device task , vm object and
4548 vcenter_conect object
4549 """
4550 vm_obj = None
4551 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4552 vcenter_conect, content = self.get_vcenter_content()
4553 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4554
4555 if vm_moref_id:
4556 try:
4557 no_of_pci_devices = len(pci_devices)
4558 if no_of_pci_devices > 0:
4559 #Get VM and its host
4560 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4561 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4562 if host_obj and vm_obj:
4563 #get PCI devies from host on which vapp is currently installed
4564 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4565
4566 if avilable_pci_devices is None:
4567 #find other hosts with active pci devices
4568 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4569 content,
4570 no_of_pci_devices
4571 )
4572
4573 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4574 #Migrate vm to the host where PCI devices are availble
4575 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4576 task = self.relocate_vm(new_host_obj, vm_obj)
4577 if task is not None:
4578 result = self.wait_for_vcenter_task(task, vcenter_conect)
4579 self.logger.info("Migrate VM status: {}".format(result))
4580 host_obj = new_host_obj
4581 else:
4582 self.logger.info("Fail to migrate VM : {}".format(result))
4583 raise vimconn.vimconnNotFoundException(
4584 "Fail to migrate VM : {} to host {}".format(
4585 vmname_andid,
4586 new_host_obj)
4587 )
4588
4589 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4590 #Add PCI devices one by one
4591 for pci_device in avilable_pci_devices:
4592 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4593 if task:
4594 status= self.wait_for_vcenter_task(task, vcenter_conect)
4595 if status:
4596 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4597 else:
4598 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4599 return True, vm_obj, vcenter_conect
4600 else:
4601 self.logger.error("Currently there is no host with"\
4602 " {} number of avaialble PCI devices required for VM {}".format(
4603 no_of_pci_devices,
4604 vmname_andid)
4605 )
4606 raise vimconn.vimconnNotFoundException(
4607 "Currently there is no host with {} "\
4608 "number of avaialble PCI devices required for VM {}".format(
4609 no_of_pci_devices,
4610 vmname_andid))
4611 else:
4612 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4613
4614 except vmodl.MethodFault as error:
4615 self.logger.error("Error occurred while adding PCI devices {} ",error)
4616 return None, vm_obj, vcenter_conect
4617
4618 def get_vm_obj(self, content, mob_id):
4619 """
4620 Method to get the vsphere VM object associated with a given morf ID
4621 Args:
4622 vapp_uuid - uuid of vApp/VM
4623 content - vCenter content object
4624 mob_id - mob_id of VM
4625
4626 Returns:
4627 VM and host object
4628 """
4629 vm_obj = None
4630 host_obj = None
4631 try :
4632 container = content.viewManager.CreateContainerView(content.rootFolder,
4633 [vim.VirtualMachine], True
4634 )
4635 for vm in container.view:
4636 mobID = vm._GetMoId()
4637 if mobID == mob_id:
4638 vm_obj = vm
4639 host_obj = vm_obj.runtime.host
4640 break
4641 except Exception as exp:
4642 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4643 return host_obj, vm_obj
4644
4645 def get_pci_devices(self, host, need_devices):
4646 """
4647 Method to get the details of pci devices on given host
4648 Args:
4649 host - vSphere host object
4650 need_devices - number of pci devices needed on host
4651
4652 Returns:
4653 array of pci devices
4654 """
4655 all_devices = []
4656 all_device_ids = []
4657 used_devices_ids = []
4658
4659 try:
4660 if host:
4661 pciPassthruInfo = host.config.pciPassthruInfo
4662 pciDevies = host.hardware.pciDevice
4663
4664 for pci_status in pciPassthruInfo:
4665 if pci_status.passthruActive:
4666 for device in pciDevies:
4667 if device.id == pci_status.id:
4668 all_device_ids.append(device.id)
4669 all_devices.append(device)
4670
4671 #check if devices are in use
4672 avalible_devices = all_devices
4673 for vm in host.vm:
4674 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4675 vm_devices = vm.config.hardware.device
4676 for device in vm_devices:
4677 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4678 if device.backing.id in all_device_ids:
4679 for use_device in avalible_devices:
4680 if use_device.id == device.backing.id:
4681 avalible_devices.remove(use_device)
4682 used_devices_ids.append(device.backing.id)
4683 self.logger.debug("Device {} from devices {}"\
4684 "is in use".format(device.backing.id,
4685 device)
4686 )
4687 if len(avalible_devices) < need_devices:
4688 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4689 need_devices))
4690 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4691 avalible_devices))
4692 return None
4693 else:
4694 required_devices = avalible_devices[:need_devices]
4695 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4696 len(avalible_devices),
4697 host,
4698 need_devices))
4699 self.logger.info("Retruning {} devices as {}".format(need_devices,
4700 required_devices ))
4701 return required_devices
4702
4703 except Exception as exp:
4704 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4705
4706 return None
4707
4708 def get_host_and_PCIdevices(self, content, need_devices):
4709 """
4710 Method to get the details of pci devices infromation on all hosts
4711
4712 Args:
4713 content - vSphere host object
4714 need_devices - number of pci devices needed on host
4715
4716 Returns:
4717 array of pci devices and host object
4718 """
4719 host_obj = None
4720 pci_device_objs = None
4721 try:
4722 if content:
4723 container = content.viewManager.CreateContainerView(content.rootFolder,
4724 [vim.HostSystem], True)
4725 for host in container.view:
4726 devices = self.get_pci_devices(host, need_devices)
4727 if devices:
4728 host_obj = host
4729 pci_device_objs = devices
4730 break
4731 except Exception as exp:
4732 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4733
4734 return host_obj,pci_device_objs
4735
4736 def relocate_vm(self, dest_host, vm) :
4737 """
4738 Method to get the relocate VM to new host
4739
4740 Args:
4741 dest_host - vSphere host object
4742 vm - vSphere VM object
4743
4744 Returns:
4745 task object
4746 """
4747 task = None
4748 try:
4749 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4750 task = vm.Relocate(relocate_spec)
4751 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4752 except Exception as exp:
4753 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4754 dest_host, vm, exp))
4755 return task
4756
4757 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4758 """
4759 Waits and provides updates on a vSphere task
4760 """
4761 while task.info.state == vim.TaskInfo.State.running:
4762 time.sleep(2)
4763
4764 if task.info.state == vim.TaskInfo.State.success:
4765 if task.info.result is not None and not hideResult:
4766 self.logger.info('{} completed successfully, result: {}'.format(
4767 actionName,
4768 task.info.result))
4769 else:
4770 self.logger.info('Task {} completed successfully.'.format(actionName))
4771 else:
4772 self.logger.error('{} did not complete successfully: {} '.format(
4773 actionName,
4774 task.info.error)
4775 )
4776
4777 return task.info.result
4778
4779 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4780 """
4781 Method to add pci device in given VM
4782
4783 Args:
4784 host_object - vSphere host object
4785 vm_object - vSphere VM object
4786 host_pci_dev - host_pci_dev must be one of the devices from the
4787 host_object.hardware.pciDevice list
4788 which is configured as a PCI passthrough device
4789
4790 Returns:
4791 task object
4792 """
4793 task = None
4794 if vm_object and host_object and host_pci_dev:
4795 try :
4796 #Add PCI device to VM
4797 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4798 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4799
4800 if host_pci_dev.id not in systemid_by_pciid:
4801 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4802 return None
4803
4804 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4805 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4806 id=host_pci_dev.id,
4807 systemId=systemid_by_pciid[host_pci_dev.id],
4808 vendorId=host_pci_dev.vendorId,
4809 deviceName=host_pci_dev.deviceName)
4810
4811 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4812
4813 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4814 new_device_config.operation = "add"
4815 vmConfigSpec = vim.vm.ConfigSpec()
4816 vmConfigSpec.deviceChange = [new_device_config]
4817
4818 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4819 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4820 host_pci_dev, vm_object, host_object)
4821 )
4822 except Exception as exp:
4823 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4824 host_pci_dev,
4825 vm_object,
4826 exp))
4827 return task
4828
4829 def get_vm_vcenter_info(self):
4830 """
4831 Method to get details of vCenter and vm
4832
4833 Args:
4834 vapp_uuid - uuid of vApp or VM
4835
4836 Returns:
4837 Moref Id of VM and deails of vCenter
4838 """
4839 vm_vcenter_info = {}
4840
4841 if self.vcenter_ip is not None:
4842 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4843 else:
4844 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4845 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4846 if self.vcenter_port is not None:
4847 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4848 else:
4849 raise vimconn.vimconnException(message="vCenter port is not provided."\
4850 " Please provide vCenter port while attaching datacenter to tenant in --config")
4851 if self.vcenter_user is not None:
4852 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4853 else:
4854 raise vimconn.vimconnException(message="vCenter user is not provided."\
4855 " Please provide vCenter user while attaching datacenter to tenant in --config")
4856
4857 if self.vcenter_password is not None:
4858 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4859 else:
4860 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4861 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4862
4863 return vm_vcenter_info
4864
4865
4866 def get_vm_pci_details(self, vmuuid):
4867 """
4868 Method to get VM PCI device details from vCenter
4869
4870 Args:
4871 vm_obj - vSphere VM object
4872
4873 Returns:
4874 dict of PCI devives attached to VM
4875
4876 """
4877 vm_pci_devices_info = {}
4878 try:
4879 vcenter_conect, content = self.get_vcenter_content()
4880 vm_moref_id = self.get_vm_moref_id(vmuuid)
4881 if vm_moref_id:
4882 #Get VM and its host
4883 if content:
4884 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4885 if host_obj and vm_obj:
4886 vm_pci_devices_info["host_name"]= host_obj.name
4887 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4888 for device in vm_obj.config.hardware.device:
4889 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4890 device_details={'devide_id':device.backing.id,
4891 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4892 }
4893 vm_pci_devices_info[device.deviceInfo.label] = device_details
4894 else:
4895 self.logger.error("Can not connect to vCenter while getting "\
4896 "PCI devices infromationn")
4897 return vm_pci_devices_info
4898 except Exception as exp:
4899 self.logger.error("Error occurred while getting VM infromationn"\
4900 " for VM : {}".format(exp))
4901 raise vimconn.vimconnException(message=exp)
4902
4903
4904 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4905 """
4906 Method to reserve memory for all VMs
4907 Args :
4908 vapp - VApp
4909 memory_mb - Memory in MB
4910 Returns:
4911 None
4912 """
4913
4914 self.logger.info("Reserve memory for all VMs")
4915 for vms in vapp.get_all_vms():
4916 vm_id = vms.get('id').split(':')[-1]
4917
4918 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4919
4920 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4921 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4922 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4923 response = self.perform_request(req_type='GET',
4924 url=url_rest_call,
4925 headers=headers)
4926
4927 if response.status_code == 403:
4928 response = self.retry_rest('GET', url_rest_call)
4929
4930 if response.status_code != 200:
4931 self.logger.error("REST call {} failed reason : {}"\
4932 "status code : {}".format(url_rest_call,
4933 response.content,
4934 response.status_code))
4935 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4936 "memory")
4937
4938 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4939 contentelem = lxmlElementTree.XML(bytexml)
4940 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
4941 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4942
4943 # Find the reservation element in the response
4944 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4945 for memelem in memelem_list:
4946 memelem.text = str(memory_mb)
4947
4948 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4949
4950 response = self.perform_request(req_type='PUT',
4951 url=url_rest_call,
4952 headers=headers,
4953 data=newdata)
4954
4955 if response.status_code == 403:
4956 add_headers = {'Content-Type': headers['Content-Type']}
4957 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4958
4959 if response.status_code != 202:
4960 self.logger.error("REST call {} failed reason : {}"\
4961 "status code : {} ".format(url_rest_call,
4962 response.content,
4963 response.status_code))
4964 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4965 "virtual hardware memory section")
4966 else:
4967 mem_task = self.get_task_from_response(response.content)
4968 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4969 if result.get('status') == 'success':
4970 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4971 .format(vm_id))
4972 else:
4973 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4974 .format(vm_id))
4975
4976 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4977 """
4978 Configure VApp network config with org vdc network
4979 Args :
4980 vapp - VApp
4981 Returns:
4982 None
4983 """
4984
4985 self.logger.info("Connecting vapp {} to org vdc network {}".
4986 format(vapp_id, net_name))
4987
4988 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4989
4990 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4991 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4992 response = self.perform_request(req_type='GET',
4993 url=url_rest_call,
4994 headers=headers)
4995
4996 if response.status_code == 403:
4997 response = self.retry_rest('GET', url_rest_call)
4998
4999 if response.status_code != 200:
5000 self.logger.error("REST call {} failed reason : {}"\
5001 "status code : {}".format(url_rest_call,
5002 response.content,
5003 response.status_code))
5004 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
5005 "network config section")
5006
5007 data = response.content
5008 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
5009 net_id = self.get_network_id_by_name(net_name)
5010 if not net_id:
5011 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
5012 "existing network")
5013
5014 bytexml = bytes(bytearray(data, encoding='utf-8'))
5015 newelem = lxmlElementTree.XML(bytexml)
5016 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
5017 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5018 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
5019
5020 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
5021 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
5022 if parentnetworklist:
5023 for pn in parentnetworklist:
5024 if "href" not in pn.keys():
5025 id_val = pn.get("id")
5026 href_val = "{}/api/network/{}".format(self.url, id_val)
5027 pn.set("href", href_val)
5028
5029 newstr = """<NetworkConfig networkName="{}">
5030 <Configuration>
5031 <ParentNetwork href="{}/api/network/{}"/>
5032 <FenceMode>bridged</FenceMode>
5033 </Configuration>
5034 </NetworkConfig>
5035 """.format(net_name, self.url, net_id)
5036 newcfgelem = lxmlElementTree.fromstring(newstr)
5037 if nwcfglist:
5038 nwcfglist[0].addnext(newcfgelem)
5039
5040 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
5041
5042 response = self.perform_request(req_type='PUT',
5043 url=url_rest_call,
5044 headers=headers,
5045 data=newdata)
5046
5047 if response.status_code == 403:
5048 add_headers = {'Content-Type': headers['Content-Type']}
5049 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5050
5051 if response.status_code != 202:
5052 self.logger.error("REST call {} failed reason : {}"\
5053 "status code : {} ".format(url_rest_call,
5054 response.content,
5055 response.status_code))
5056 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
5057 "network config section")
5058 else:
5059 vapp_task = self.get_task_from_response(response.content)
5060 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
5061 if result.get('status') == 'success':
5062 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
5063 "network {}".format(vapp_id, net_name))
5064 else:
5065 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
5066 "connect to network {}".format(vapp_id, net_name))
5067
5068 def remove_primary_network_adapter_from_all_vms(self, vapp):
5069 """
5070 Method to remove network adapter type to vm
5071 Args :
5072 vapp - VApp
5073 Returns:
5074 None
5075 """
5076
5077 self.logger.info("Removing network adapter from all VMs")
5078 for vms in vapp.get_all_vms():
5079 vm_id = vms.get('id').split(':')[-1]
5080
5081 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5082
5083 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5084 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5085 response = self.perform_request(req_type='GET',
5086 url=url_rest_call,
5087 headers=headers)
5088
5089 if response.status_code == 403:
5090 response = self.retry_rest('GET', url_rest_call)
5091
5092 if response.status_code != 200:
5093 self.logger.error("REST call {} failed reason : {}"\
5094 "status code : {}".format(url_rest_call,
5095 response.content,
5096 response.status_code))
5097 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
5098 "network connection section")
5099
5100 data = response.content
5101 data = data.split('<Link rel="edit"')[0]
5102
5103 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5104
5105 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5106 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5107 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5108 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5109 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5110 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5111 xmlns:vmw="http://www.vmware.com/schema/ovf"
5112 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5113 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5114 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5115 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
5116 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5117 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5118 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5119 </NetworkConnectionSection>""".format(url=url_rest_call)
5120 response = self.perform_request(req_type='PUT',
5121 url=url_rest_call,
5122 headers=headers,
5123 data=newdata)
5124
5125 if response.status_code == 403:
5126 add_headers = {'Content-Type': headers['Content-Type']}
5127 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5128
5129 if response.status_code != 202:
5130 self.logger.error("REST call {} failed reason : {}"\
5131 "status code : {} ".format(url_rest_call,
5132 response.content,
5133 response.status_code))
5134 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5135 "network connection section")
5136 else:
5137 nic_task = self.get_task_from_response(response.content)
5138 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5139 if result.get('status') == 'success':
5140 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5141 "default NIC type".format(vm_id))
5142 else:
5143 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5144 "connect NIC type".format(vm_id))
5145
5146 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5147 """
5148 Method to add network adapter type to vm
5149 Args :
5150 network_name - name of network
5151 primary_nic_index - int value for primary nic index
5152 nicIndex - int value for nic index
5153 nic_type - specify model name to which add to vm
5154 Returns:
5155 None
5156 """
5157
5158 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5159 format(network_name, nicIndex, nic_type))
5160 try:
5161 ip_address = None
5162 floating_ip = False
5163 mac_address = None
5164 if 'floating_ip' in net: floating_ip = net['floating_ip']
5165
5166 # Stub for ip_address feature
5167 if 'ip_address' in net: ip_address = net['ip_address']
5168
5169 if 'mac_address' in net: mac_address = net['mac_address']
5170
5171 if floating_ip:
5172 allocation_mode = "POOL"
5173 elif ip_address:
5174 allocation_mode = "MANUAL"
5175 else:
5176 allocation_mode = "DHCP"
5177
5178 if not nic_type:
5179 for vms in vapp.get_all_vms():
5180 vm_id = vms.get('id').split(':')[-1]
5181
5182 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5183
5184 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5185 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5186 response = self.perform_request(req_type='GET',
5187 url=url_rest_call,
5188 headers=headers)
5189
5190 if response.status_code == 403:
5191 response = self.retry_rest('GET', url_rest_call)
5192
5193 if response.status_code != 200:
5194 self.logger.error("REST call {} failed reason : {}"\
5195 "status code : {}".format(url_rest_call,
5196 response.content,
5197 response.status_code))
5198 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5199 "network connection section")
5200
5201 data = response.content
5202 data = data.split('<Link rel="edit"')[0]
5203 if '<PrimaryNetworkConnectionIndex>' not in data:
5204 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5205 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5206 <NetworkConnection network="{}">
5207 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5208 <IsConnected>true</IsConnected>
5209 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5210 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5211 allocation_mode)
5212 # Stub for ip_address feature
5213 if ip_address:
5214 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5215 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5216
5217 if mac_address:
5218 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5219 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5220
5221 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5222 else:
5223 self.logger.debug("add_network_adapter PrimaryNIC in data")
5224 new_item = """<NetworkConnection network="{}">
5225 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5226 <IsConnected>true</IsConnected>
5227 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5228 </NetworkConnection>""".format(network_name, nicIndex,
5229 allocation_mode)
5230 # Stub for ip_address feature
5231 if ip_address:
5232 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5233 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5234
5235 if mac_address:
5236 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5237 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5238
5239 data = data + new_item + '</NetworkConnectionSection>'
5240
5241 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5242
5243 response = self.perform_request(req_type='PUT',
5244 url=url_rest_call,
5245 headers=headers,
5246 data=data)
5247
5248 if response.status_code == 403:
5249 add_headers = {'Content-Type': headers['Content-Type']}
5250 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5251
5252 if response.status_code != 202:
5253 self.logger.error("REST call {} failed reason : {}"\
5254 "status code : {} ".format(url_rest_call,
5255 response.content,
5256 response.status_code))
5257 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5258 "network connection section")
5259 else:
5260 nic_task = self.get_task_from_response(response.content)
5261 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5262 if result.get('status') == 'success':
5263 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5264 "default NIC type".format(vm_id))
5265 else:
5266 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5267 "connect NIC type".format(vm_id))
5268 else:
5269 for vms in vapp.get_all_vms():
5270 vm_id = vms.get('id').split(':')[-1]
5271
5272 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5273
5274 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5275 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5276 response = self.perform_request(req_type='GET',
5277 url=url_rest_call,
5278 headers=headers)
5279
5280 if response.status_code == 403:
5281 response = self.retry_rest('GET', url_rest_call)
5282
5283 if response.status_code != 200:
5284 self.logger.error("REST call {} failed reason : {}"\
5285 "status code : {}".format(url_rest_call,
5286 response.content,
5287 response.status_code))
5288 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5289 "network connection section")
5290 data = response.content
5291 data = data.split('<Link rel="edit"')[0]
5292 vcd_netadapter_type = nic_type
5293 if nic_type in ['SR-IOV', 'VF']:
5294 vcd_netadapter_type = "SRIOVETHERNETCARD"
5295
5296 if '<PrimaryNetworkConnectionIndex>' not in data:
5297 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5298 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5299 <NetworkConnection network="{}">
5300 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5301 <IsConnected>true</IsConnected>
5302 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5303 <NetworkAdapterType>{}</NetworkAdapterType>
5304 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5305 allocation_mode, vcd_netadapter_type)
5306 # Stub for ip_address feature
5307 if ip_address:
5308 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5309 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5310
5311 if mac_address:
5312 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5313 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5314
5315 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5316 else:
5317 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5318 new_item = """<NetworkConnection network="{}">
5319 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5320 <IsConnected>true</IsConnected>
5321 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5322 <NetworkAdapterType>{}</NetworkAdapterType>
5323 </NetworkConnection>""".format(network_name, nicIndex,
5324 allocation_mode, vcd_netadapter_type)
5325 # Stub for ip_address feature
5326 if ip_address:
5327 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5328 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5329
5330 if mac_address:
5331 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5332 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5333
5334 data = data + new_item + '</NetworkConnectionSection>'
5335
5336 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5337
5338 response = self.perform_request(req_type='PUT',
5339 url=url_rest_call,
5340 headers=headers,
5341 data=data)
5342
5343 if response.status_code == 403:
5344 add_headers = {'Content-Type': headers['Content-Type']}
5345 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5346
5347 if response.status_code != 202:
5348 self.logger.error("REST call {} failed reason : {}"\
5349 "status code : {}".format(url_rest_call,
5350 response.content,
5351 response.status_code))
5352 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5353 "network connection section")
5354 else:
5355 nic_task = self.get_task_from_response(response.content)
5356 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5357 if result.get('status') == 'success':
5358 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5359 "conneced to NIC type {}".format(vm_id, nic_type))
5360 else:
5361 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5362 "failed to connect NIC type {}".format(vm_id, nic_type))
5363 except Exception as exp:
5364 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5365 "while adding Network adapter")
5366 raise vimconn.vimconnException(message=exp)
5367
5368
5369 def set_numa_affinity(self, vmuuid, paired_threads_id):
5370 """
5371 Method to assign numa affinity in vm configuration parammeters
5372 Args :
5373 vmuuid - vm uuid
5374 paired_threads_id - one or more virtual processor
5375 numbers
5376 Returns:
5377 return if True
5378 """
5379 try:
5380 vcenter_conect, content = self.get_vcenter_content()
5381 vm_moref_id = self.get_vm_moref_id(vmuuid)
5382
5383 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5384 if vm_obj:
5385 config_spec = vim.vm.ConfigSpec()
5386 config_spec.extraConfig = []
5387 opt = vim.option.OptionValue()
5388 opt.key = 'numa.nodeAffinity'
5389 opt.value = str(paired_threads_id)
5390 config_spec.extraConfig.append(opt)
5391 task = vm_obj.ReconfigVM_Task(config_spec)
5392 if task:
5393 result = self.wait_for_vcenter_task(task, vcenter_conect)
5394 extra_config = vm_obj.config.extraConfig
5395 flag = False
5396 for opts in extra_config:
5397 if 'numa.nodeAffinity' in opts.key:
5398 flag = True
5399 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5400 "value {} for vm {}".format(opt.value, vm_obj))
5401 if flag:
5402 return
5403 else:
5404 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5405 except Exception as exp:
5406 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5407 "for VM {} : {}".format(vm_obj, vm_moref_id))
5408 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5409 "affinity".format(exp))
5410
5411
5412 def cloud_init(self, vapp, cloud_config):
5413 """
5414 Method to inject ssh-key
5415 vapp - vapp object
5416 cloud_config a dictionary with:
5417 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5418 'users': (optional) list of users to be inserted, each item is a dict with:
5419 'name': (mandatory) user name,
5420 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5421 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5422 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5423 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5424 'dest': (mandatory) string with the destination absolute path
5425 'encoding': (optional, by default text). Can be one of:
5426 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5427 'content' (mandatory): string with the content of the file
5428 'permissions': (optional) string with file permissions, typically octal notation '0644'
5429 'owner': (optional) file owner, string with the format 'owner:group'
5430 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5431 """
5432 try:
5433 if not isinstance(cloud_config, dict):
5434 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5435 else:
5436 key_pairs = []
5437 userdata = []
5438 if "key-pairs" in cloud_config:
5439 key_pairs = cloud_config["key-pairs"]
5440
5441 if "users" in cloud_config:
5442 userdata = cloud_config["users"]
5443
5444 self.logger.debug("cloud_init : Guest os customization started..")
5445 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5446 customize_script = customize_script.replace("&","&amp;")
5447 self.guest_customization(vapp, customize_script)
5448
5449 except Exception as exp:
5450 self.logger.error("cloud_init : exception occurred while injecting "\
5451 "ssh-key")
5452 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5453 "ssh-key".format(exp))
5454
5455 def format_script(self, key_pairs=[], users_list=[]):
5456 bash_script = """#!/bin/sh
5457 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5458 if [ "$1" = "precustomization" ];then
5459 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5460 """
5461
5462 keys = "\n".join(key_pairs)
5463 if keys:
5464 keys_data = """
5465 if [ ! -d /root/.ssh ];then
5466 mkdir /root/.ssh
5467 chown root:root /root/.ssh
5468 chmod 700 /root/.ssh
5469 touch /root/.ssh/authorized_keys
5470 chown root:root /root/.ssh/authorized_keys
5471 chmod 600 /root/.ssh/authorized_keys
5472 # make centos with selinux happy
5473 which restorecon && restorecon -Rv /root/.ssh
5474 else
5475 touch /root/.ssh/authorized_keys
5476 chown root:root /root/.ssh/authorized_keys
5477 chmod 600 /root/.ssh/authorized_keys
5478 fi
5479 echo '{key}' >> /root/.ssh/authorized_keys
5480 """.format(key=keys)
5481
5482 bash_script+= keys_data
5483
5484 for user in users_list:
5485 if 'name' in user: user_name = user['name']
5486 if 'key-pairs' in user:
5487 user_keys = "\n".join(user['key-pairs'])
5488 else:
5489 user_keys = None
5490
5491 add_user_name = """
5492 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5493 """.format(user_name=user_name)
5494
5495 bash_script+= add_user_name
5496
5497 if user_keys:
5498 user_keys_data = """
5499 mkdir /home/{user_name}/.ssh
5500 chown {user_name}:{user_name} /home/{user_name}/.ssh
5501 chmod 700 /home/{user_name}/.ssh
5502 touch /home/{user_name}/.ssh/authorized_keys
5503 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5504 chmod 600 /home/{user_name}/.ssh/authorized_keys
5505 # make centos with selinux happy
5506 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5507 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5508 """.format(user_name=user_name,user_key=user_keys)
5509
5510 bash_script+= user_keys_data
5511
5512 return bash_script+"\n\tfi"
5513
5514 def guest_customization(self, vapp, customize_script):
5515 """
5516 Method to customize guest os
5517 vapp - Vapp object
5518 customize_script - Customize script to be run at first boot of VM.
5519 """
5520 for vm in vapp.get_all_vms():
5521 vm_id = vm.get('id').split(':')[-1]
5522 vm_name = vm.get('name')
5523 vm_name = vm_name.replace('_','-')
5524
5525 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5526 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5527 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5528
5529 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5530
5531 data = """<GuestCustomizationSection
5532 xmlns="http://www.vmware.com/vcloud/v1.5"
5533 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5534 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5535 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5536 <Enabled>true</Enabled>
5537 <ChangeSid>false</ChangeSid>
5538 <VirtualMachineId>{}</VirtualMachineId>
5539 <JoinDomainEnabled>false</JoinDomainEnabled>
5540 <UseOrgSettings>false</UseOrgSettings>
5541 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5542 <AdminPasswordAuto>true</AdminPasswordAuto>
5543 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5544 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5545 <ResetPasswordRequired>false</ResetPasswordRequired>
5546 <CustomizationScript>{}</CustomizationScript>
5547 <ComputerName>{}</ComputerName>
5548 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5549 </GuestCustomizationSection>
5550 """.format(vm_customization_url,
5551 vm_id,
5552 customize_script,
5553 vm_name,
5554 vm_customization_url)
5555
5556 response = self.perform_request(req_type='PUT',
5557 url=vm_customization_url,
5558 headers=headers,
5559 data=data)
5560 if response.status_code == 202:
5561 guest_task = self.get_task_from_response(response.content)
5562 self.client.get_task_monitor().wait_for_success(task=guest_task)
5563 self.logger.info("guest_customization : customized guest os task "\
5564 "completed for VM {}".format(vm_name))
5565 else:
5566 self.logger.error("guest_customization : task for customized guest os"\
5567 "failed for VM {}".format(vm_name))
5568 raise vimconn.vimconnException("guest_customization : failed to perform"\
5569 "guest os customization on VM {}".format(vm_name))
5570
5571 def add_new_disk(self, vapp_uuid, disk_size):
5572 """
5573 Method to create an empty vm disk
5574
5575 Args:
5576 vapp_uuid - is vapp identifier.
5577 disk_size - size of disk to be created in GB
5578
5579 Returns:
5580 None
5581 """
5582 status = False
5583 vm_details = None
5584 try:
5585 #Disk size in GB, convert it into MB
5586 if disk_size is not None:
5587 disk_size_mb = int(disk_size) * 1024
5588 vm_details = self.get_vapp_details_rest(vapp_uuid)
5589
5590 if vm_details and "vm_virtual_hardware" in vm_details:
5591 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5592 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5593 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5594
5595 except Exception as exp:
5596 msg = "Error occurred while creating new disk {}.".format(exp)
5597 self.rollback_newvm(vapp_uuid, msg)
5598
5599 if status:
5600 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5601 else:
5602 #If failed to add disk, delete VM
5603 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5604 self.rollback_newvm(vapp_uuid, msg)
5605
5606
5607 def add_new_disk_rest(self, disk_href, disk_size_mb):
5608 """
5609 Retrives vApp Disks section & add new empty disk
5610
5611 Args:
5612 disk_href: Disk section href to addd disk
5613 disk_size_mb: Disk size in MB
5614
5615 Returns: Status of add new disk task
5616 """
5617 status = False
5618 if self.client._session:
5619 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5620 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5621 response = self.perform_request(req_type='GET',
5622 url=disk_href,
5623 headers=headers)
5624
5625 if response.status_code == 403:
5626 response = self.retry_rest('GET', disk_href)
5627
5628 if response.status_code != requests.codes.ok:
5629 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5630 .format(disk_href, response.status_code))
5631 return status
5632 try:
5633 #Find but type & max of instance IDs assigned to disks
5634 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5635 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
5636 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5637 instance_id = 0
5638 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5639 if item.find("rasd:Description",namespaces).text == "Hard disk":
5640 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5641 if inst_id > instance_id:
5642 instance_id = inst_id
5643 disk_item = item.find("rasd:HostResource" ,namespaces)
5644 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5645 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5646
5647 instance_id = instance_id + 1
5648 new_item = """<Item>
5649 <rasd:Description>Hard disk</rasd:Description>
5650 <rasd:ElementName>New disk</rasd:ElementName>
5651 <rasd:HostResource
5652 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5653 vcloud:capacity="{}"
5654 vcloud:busSubType="{}"
5655 vcloud:busType="{}"></rasd:HostResource>
5656 <rasd:InstanceID>{}</rasd:InstanceID>
5657 <rasd:ResourceType>17</rasd:ResourceType>
5658 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5659
5660 new_data = response.content
5661 #Add new item at the bottom
5662 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5663
5664 # Send PUT request to modify virtual hardware section with new disk
5665 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5666
5667 response = self.perform_request(req_type='PUT',
5668 url=disk_href,
5669 data=new_data,
5670 headers=headers)
5671
5672 if response.status_code == 403:
5673 add_headers = {'Content-Type': headers['Content-Type']}
5674 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5675
5676 if response.status_code != 202:
5677 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5678 .format(disk_href, response.status_code, response.content))
5679 else:
5680 add_disk_task = self.get_task_from_response(response.content)
5681 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5682 if result.get('status') == 'success':
5683 status = True
5684 else:
5685 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5686
5687 except Exception as exp:
5688 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5689
5690 return status
5691
5692
5693 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5694 """
5695 Method to add existing disk to vm
5696 Args :
5697 catalogs - List of VDC catalogs
5698 image_id - Catalog ID
5699 template_name - Name of template in catalog
5700 vapp_uuid - UUID of vApp
5701 Returns:
5702 None
5703 """
5704 disk_info = None
5705 vcenter_conect, content = self.get_vcenter_content()
5706 #find moref-id of vm in image
5707 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5708 image_id=image_id,
5709 )
5710
5711 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5712 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5713 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5714 if catalog_vm_moref_id:
5715 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5716 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5717 if catalog_vm_obj:
5718 #find existing disk
5719 disk_info = self.find_disk(catalog_vm_obj)
5720 else:
5721 exp_msg = "No VM with image id {} found".format(image_id)
5722 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5723 else:
5724 exp_msg = "No Image found with image ID {} ".format(image_id)
5725 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5726
5727 if disk_info:
5728 self.logger.info("Existing disk_info : {}".format(disk_info))
5729 #get VM
5730 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5731 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5732 if vm_obj:
5733 status = self.add_disk(vcenter_conect=vcenter_conect,
5734 vm=vm_obj,
5735 disk_info=disk_info,
5736 size=size,
5737 vapp_uuid=vapp_uuid
5738 )
5739 if status:
5740 self.logger.info("Disk from image id {} added to {}".format(image_id,
5741 vm_obj.config.name)
5742 )
5743 else:
5744 msg = "No disk found with image id {} to add in VM {}".format(
5745 image_id,
5746 vm_obj.config.name)
5747 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5748
5749
5750 def find_disk(self, vm_obj):
5751 """
5752 Method to find details of existing disk in VM
5753 Args :
5754 vm_obj - vCenter object of VM
5755 image_id - Catalog ID
5756 Returns:
5757 disk_info : dict of disk details
5758 """
5759 disk_info = {}
5760 if vm_obj:
5761 try:
5762 devices = vm_obj.config.hardware.device
5763 for device in devices:
5764 if type(device) is vim.vm.device.VirtualDisk:
5765 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5766 disk_info["full_path"] = device.backing.fileName
5767 disk_info["datastore"] = device.backing.datastore
5768 disk_info["capacityKB"] = device.capacityInKB
5769 break
5770 except Exception as exp:
5771 self.logger.error("find_disk() : exception occurred while "\
5772 "getting existing disk details :{}".format(exp))
5773 return disk_info
5774
5775
5776 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5777 """
5778 Method to add existing disk in VM
5779 Args :
5780 vcenter_conect - vCenter content object
5781 vm - vCenter vm object
5782 disk_info : dict of disk details
5783 Returns:
5784 status : status of add disk task
5785 """
5786 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5787 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5788 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5789 if size is not None:
5790 #Convert size from GB to KB
5791 sizeKB = int(size) * 1024 * 1024
5792 #compare size of existing disk and user given size.Assign whicherver is greater
5793 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5794 sizeKB, capacityKB))
5795 if sizeKB > capacityKB:
5796 capacityKB = sizeKB
5797
5798 if datastore and fullpath and capacityKB:
5799 try:
5800 spec = vim.vm.ConfigSpec()
5801 # get all disks on a VM, set unit_number to the next available
5802 unit_number = 0
5803 for dev in vm.config.hardware.device:
5804 if hasattr(dev.backing, 'fileName'):
5805 unit_number = int(dev.unitNumber) + 1
5806 # unit_number 7 reserved for scsi controller
5807 if unit_number == 7:
5808 unit_number += 1
5809 if isinstance(dev, vim.vm.device.VirtualDisk):
5810 #vim.vm.device.VirtualSCSIController
5811 controller_key = dev.controllerKey
5812
5813 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5814 unit_number, controller_key))
5815 # add disk here
5816 dev_changes = []
5817 disk_spec = vim.vm.device.VirtualDeviceSpec()
5818 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5819 disk_spec.device = vim.vm.device.VirtualDisk()
5820 disk_spec.device.backing = \
5821 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5822 disk_spec.device.backing.thinProvisioned = True
5823 disk_spec.device.backing.diskMode = 'persistent'
5824 disk_spec.device.backing.datastore = datastore
5825 disk_spec.device.backing.fileName = fullpath
5826
5827 disk_spec.device.unitNumber = unit_number
5828 disk_spec.device.capacityInKB = capacityKB
5829 disk_spec.device.controllerKey = controller_key
5830 dev_changes.append(disk_spec)
5831 spec.deviceChange = dev_changes
5832 task = vm.ReconfigVM_Task(spec=spec)
5833 status = self.wait_for_vcenter_task(task, vcenter_conect)
5834 return status
5835 except Exception as exp:
5836 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5837 "{} to vm {}".format(exp,
5838 fullpath,
5839 vm.config.name)
5840 self.rollback_newvm(vapp_uuid, exp_msg)
5841 else:
5842 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5843 self.rollback_newvm(vapp_uuid, msg)
5844
5845
5846 def get_vcenter_content(self):
5847 """
5848 Get the vsphere content object
5849 """
5850 try:
5851 vm_vcenter_info = self.get_vm_vcenter_info()
5852 except Exception as exp:
5853 self.logger.error("Error occurred while getting vCenter infromationn"\
5854 " for VM : {}".format(exp))
5855 raise vimconn.vimconnException(message=exp)
5856
5857 context = None
5858 if hasattr(ssl, '_create_unverified_context'):
5859 context = ssl._create_unverified_context()
5860
5861 vcenter_conect = SmartConnect(
5862 host=vm_vcenter_info["vm_vcenter_ip"],
5863 user=vm_vcenter_info["vm_vcenter_user"],
5864 pwd=vm_vcenter_info["vm_vcenter_password"],
5865 port=int(vm_vcenter_info["vm_vcenter_port"]),
5866 sslContext=context
5867 )
5868 atexit.register(Disconnect, vcenter_conect)
5869 content = vcenter_conect.RetrieveContent()
5870 return vcenter_conect, content
5871
5872
5873 def get_vm_moref_id(self, vapp_uuid):
5874 """
5875 Get the moref_id of given VM
5876 """
5877 try:
5878 if vapp_uuid:
5879 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5880 if vm_details and "vm_vcenter_info" in vm_details:
5881 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5882 return vm_moref_id
5883
5884 except Exception as exp:
5885 self.logger.error("Error occurred while getting VM moref ID "\
5886 " for VM : {}".format(exp))
5887 return None
5888
5889
5890 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5891 """
5892 Method to get vApp template details
5893 Args :
5894 catalogs - list of VDC catalogs
5895 image_id - Catalog ID to find
5896 template_name : template name in catalog
5897 Returns:
5898 parsed_respond : dict of vApp tempalte details
5899 """
5900 parsed_response = {}
5901
5902 vca = self.connect_as_admin()
5903 if not vca:
5904 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5905
5906 try:
5907 org, vdc = self.get_vdc_details()
5908 catalog = self.get_catalog_obj(image_id, catalogs)
5909 if catalog:
5910 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5911 catalog_items = [items.attrib]
5912
5913 if len(catalog_items) == 1:
5914 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5915 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5916
5917 response = self.perform_request(req_type='GET',
5918 url=catalog_items[0].get('href'),
5919 headers=headers)
5920 catalogItem = XmlElementTree.fromstring(response.content)
5921 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5922 vapp_tempalte_href = entity.get("href")
5923 #get vapp details and parse moref id
5924
5925 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5926 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5927 'vmw': 'http://www.vmware.com/schema/ovf',
5928 'vm': 'http://www.vmware.com/vcloud/v1.5',
5929 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5930 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5931 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5932 }
5933
5934 if vca._session:
5935 response = self.perform_request(req_type='GET',
5936 url=vapp_tempalte_href,
5937 headers=headers)
5938
5939 if response.status_code != requests.codes.ok:
5940 self.logger.debug("REST API call {} failed. Return status code {}".format(
5941 vapp_tempalte_href, response.status_code))
5942
5943 else:
5944 xmlroot_respond = XmlElementTree.fromstring(response.content)
5945 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5946 if children_section is not None:
5947 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5948 if vCloud_extension_section is not None:
5949 vm_vcenter_info = {}
5950 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5951 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5952 if vmext is not None:
5953 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5954 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5955
5956 except Exception as exp :
5957 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5958
5959 return parsed_response
5960
5961
5962 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5963 """
5964 Method to delete vApp
5965 Args :
5966 vapp_uuid - vApp UUID
5967 msg - Error message to be logged
5968 exp_type : Exception type
5969 Returns:
5970 None
5971 """
5972 if vapp_uuid:
5973 status = self.delete_vminstance(vapp_uuid)
5974 else:
5975 msg = "No vApp ID"
5976 self.logger.error(msg)
5977 if exp_type == "Genric":
5978 raise vimconn.vimconnException(msg)
5979 elif exp_type == "NotFound":
5980 raise vimconn.vimconnNotFoundException(message=msg)
5981
5982 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5983 """
5984 Method to attach SRIOV adapters to VM
5985
5986 Args:
5987 vapp_uuid - uuid of vApp/VM
5988 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5989 vmname_andid - vmname
5990
5991 Returns:
5992 The status of add SRIOV adapter task , vm object and
5993 vcenter_conect object
5994 """
5995 vm_obj = None
5996 vcenter_conect, content = self.get_vcenter_content()
5997 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5998
5999 if vm_moref_id:
6000 try:
6001 no_of_sriov_devices = len(sriov_nets)
6002 if no_of_sriov_devices > 0:
6003 #Get VM and its host
6004 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6005 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
6006 if host_obj and vm_obj:
6007 #get SRIOV devies from host on which vapp is currently installed
6008 avilable_sriov_devices = self.get_sriov_devices(host_obj,
6009 no_of_sriov_devices,
6010 )
6011
6012 if len(avilable_sriov_devices) == 0:
6013 #find other hosts with active pci devices
6014 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
6015 content,
6016 no_of_sriov_devices,
6017 )
6018
6019 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
6020 #Migrate vm to the host where SRIOV devices are available
6021 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
6022 new_host_obj))
6023 task = self.relocate_vm(new_host_obj, vm_obj)
6024 if task is not None:
6025 result = self.wait_for_vcenter_task(task, vcenter_conect)
6026 self.logger.info("Migrate VM status: {}".format(result))
6027 host_obj = new_host_obj
6028 else:
6029 self.logger.info("Fail to migrate VM : {}".format(result))
6030 raise vimconn.vimconnNotFoundException(
6031 "Fail to migrate VM : {} to host {}".format(
6032 vmname_andid,
6033 new_host_obj)
6034 )
6035
6036 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
6037 #Add SRIOV devices one by one
6038 for sriov_net in sriov_nets:
6039 network_name = sriov_net.get('net_id')
6040 dvs_portgr_name = self.create_dvPort_group(network_name)
6041 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
6042 #add vlan ID ,Modify portgroup for vlan ID
6043 self.configure_vlanID(content, vcenter_conect, network_name)
6044
6045 task = self.add_sriov_to_vm(content,
6046 vm_obj,
6047 host_obj,
6048 network_name,
6049 avilable_sriov_devices[0]
6050 )
6051 if task:
6052 status= self.wait_for_vcenter_task(task, vcenter_conect)
6053 if status:
6054 self.logger.info("Added SRIOV {} to VM {}".format(
6055 no_of_sriov_devices,
6056 str(vm_obj)))
6057 else:
6058 self.logger.error("Fail to add SRIOV {} to VM {}".format(
6059 no_of_sriov_devices,
6060 str(vm_obj)))
6061 raise vimconn.vimconnUnexpectedResponse(
6062 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
6063 )
6064 return True, vm_obj, vcenter_conect
6065 else:
6066 self.logger.error("Currently there is no host with"\
6067 " {} number of avaialble SRIOV "\
6068 "VFs required for VM {}".format(
6069 no_of_sriov_devices,
6070 vmname_andid)
6071 )
6072 raise vimconn.vimconnNotFoundException(
6073 "Currently there is no host with {} "\
6074 "number of avaialble SRIOV devices required for VM {}".format(
6075 no_of_sriov_devices,
6076 vmname_andid))
6077 else:
6078 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
6079
6080 except vmodl.MethodFault as error:
6081 self.logger.error("Error occurred while adding SRIOV {} ",error)
6082 return None, vm_obj, vcenter_conect
6083
6084
6085 def get_sriov_devices(self,host, no_of_vfs):
6086 """
6087 Method to get the details of SRIOV devices on given host
6088 Args:
6089 host - vSphere host object
6090 no_of_vfs - number of VFs needed on host
6091
6092 Returns:
6093 array of SRIOV devices
6094 """
6095 sriovInfo=[]
6096 if host:
6097 for device in host.config.pciPassthruInfo:
6098 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
6099 if device.numVirtualFunction >= no_of_vfs:
6100 sriovInfo.append(device)
6101 break
6102 return sriovInfo
6103
6104
6105 def get_host_and_sriov_devices(self, content, no_of_vfs):
6106 """
6107 Method to get the details of SRIOV devices infromation on all hosts
6108
6109 Args:
6110 content - vSphere host object
6111 no_of_vfs - number of pci VFs needed on host
6112
6113 Returns:
6114 array of SRIOV devices and host object
6115 """
6116 host_obj = None
6117 sriov_device_objs = None
6118 try:
6119 if content:
6120 container = content.viewManager.CreateContainerView(content.rootFolder,
6121 [vim.HostSystem], True)
6122 for host in container.view:
6123 devices = self.get_sriov_devices(host, no_of_vfs)
6124 if devices:
6125 host_obj = host
6126 sriov_device_objs = devices
6127 break
6128 except Exception as exp:
6129 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6130
6131 return host_obj,sriov_device_objs
6132
6133
6134 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6135 """
6136 Method to add SRIOV adapter to vm
6137
6138 Args:
6139 host_obj - vSphere host object
6140 vm_obj - vSphere vm object
6141 content - vCenter content object
6142 network_name - name of distributed virtaul portgroup
6143 sriov_device - SRIOV device info
6144
6145 Returns:
6146 task object
6147 """
6148 devices = []
6149 vnic_label = "sriov nic"
6150 try:
6151 dvs_portgr = self.get_dvport_group(network_name)
6152 network_name = dvs_portgr.name
6153 nic = vim.vm.device.VirtualDeviceSpec()
6154 # VM device
6155 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6156 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6157 nic.device.addressType = 'assigned'
6158 #nic.device.key = 13016
6159 nic.device.deviceInfo = vim.Description()
6160 nic.device.deviceInfo.label = vnic_label
6161 nic.device.deviceInfo.summary = network_name
6162 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6163
6164 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6165 nic.device.backing.deviceName = network_name
6166 nic.device.backing.useAutoDetect = False
6167 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6168 nic.device.connectable.startConnected = True
6169 nic.device.connectable.allowGuestControl = True
6170
6171 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6172 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6173 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6174
6175 devices.append(nic)
6176 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6177 task = vm_obj.ReconfigVM_Task(vmconf)
6178 return task
6179 except Exception as exp:
6180 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6181 return None
6182
6183
6184 def create_dvPort_group(self, network_name):
6185 """
6186 Method to create disributed virtual portgroup
6187
6188 Args:
6189 network_name - name of network/portgroup
6190
6191 Returns:
6192 portgroup key
6193 """
6194 try:
6195 new_network_name = [network_name, '-', str(uuid.uuid4())]
6196 network_name=''.join(new_network_name)
6197 vcenter_conect, content = self.get_vcenter_content()
6198
6199 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6200 if dv_switch:
6201 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6202 dv_pg_spec.name = network_name
6203
6204 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6205 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6206 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6207 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6208 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6209 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6210
6211 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6212 self.wait_for_vcenter_task(task, vcenter_conect)
6213
6214 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6215 if dvPort_group:
6216 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6217 return dvPort_group.key
6218 else:
6219 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6220
6221 except Exception as exp:
6222 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6223 " : {}".format(network_name, exp))
6224 return None
6225
6226 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6227 """
6228 Method to reconfigure disributed virtual portgroup
6229
6230 Args:
6231 dvPort_group_name - name of disributed virtual portgroup
6232 content - vCenter content object
6233 config_info - disributed virtual portgroup configuration
6234
6235 Returns:
6236 task object
6237 """
6238 try:
6239 dvPort_group = self.get_dvport_group(dvPort_group_name)
6240 if dvPort_group:
6241 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6242 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6243 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6244 if "vlanID" in config_info:
6245 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6246 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6247
6248 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6249 return task
6250 else:
6251 return None
6252 except Exception as exp:
6253 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6254 " : {}".format(dvPort_group_name, exp))
6255 return None
6256
6257
6258 def destroy_dvport_group(self , dvPort_group_name):
6259 """
6260 Method to destroy disributed virtual portgroup
6261
6262 Args:
6263 network_name - name of network/portgroup
6264
6265 Returns:
6266 True if portgroup successfully got deleted else false
6267 """
6268 vcenter_conect, content = self.get_vcenter_content()
6269 try:
6270 status = None
6271 dvPort_group = self.get_dvport_group(dvPort_group_name)
6272 if dvPort_group:
6273 task = dvPort_group.Destroy_Task()
6274 status = self.wait_for_vcenter_task(task, vcenter_conect)
6275 return status
6276 except vmodl.MethodFault as exp:
6277 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6278 exp, dvPort_group_name))
6279 return None
6280
6281
6282 def get_dvport_group(self, dvPort_group_name):
6283 """
6284 Method to get disributed virtual portgroup
6285
6286 Args:
6287 network_name - name of network/portgroup
6288
6289 Returns:
6290 portgroup object
6291 """
6292 vcenter_conect, content = self.get_vcenter_content()
6293 dvPort_group = None
6294 try:
6295 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6296 for item in container.view:
6297 if item.key == dvPort_group_name:
6298 dvPort_group = item
6299 break
6300 return dvPort_group
6301 except vmodl.MethodFault as exp:
6302 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6303 exp, dvPort_group_name))
6304 return None
6305
6306 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6307 """
6308 Method to get disributed virtual portgroup vlanID
6309
6310 Args:
6311 network_name - name of network/portgroup
6312
6313 Returns:
6314 vlan ID
6315 """
6316 vlanId = None
6317 try:
6318 dvPort_group = self.get_dvport_group(dvPort_group_name)
6319 if dvPort_group:
6320 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6321 except vmodl.MethodFault as exp:
6322 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6323 exp, dvPort_group_name))
6324 return vlanId
6325
6326
6327 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6328 """
6329 Method to configure vlanID in disributed virtual portgroup vlanID
6330
6331 Args:
6332 network_name - name of network/portgroup
6333
6334 Returns:
6335 None
6336 """
6337 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6338 if vlanID == 0:
6339 #configure vlanID
6340 vlanID = self.genrate_vlanID(dvPort_group_name)
6341 config = {"vlanID":vlanID}
6342 task = self.reconfig_portgroup(content, dvPort_group_name,
6343 config_info=config)
6344 if task:
6345 status= self.wait_for_vcenter_task(task, vcenter_conect)
6346 if status:
6347 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6348 dvPort_group_name,vlanID))
6349 else:
6350 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6351 dvPort_group_name, vlanID))
6352
6353
6354 def genrate_vlanID(self, network_name):
6355 """
6356 Method to get unused vlanID
6357 Args:
6358 network_name - name of network/portgroup
6359 Returns:
6360 vlanID
6361 """
6362 vlan_id = None
6363 used_ids = []
6364 if self.config.get('vlanID_range') == None:
6365 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6366 "at config value before creating sriov network with vlan tag")
6367 if "used_vlanIDs" not in self.persistent_info:
6368 self.persistent_info["used_vlanIDs"] = {}
6369 else:
6370 used_ids = list(self.persistent_info["used_vlanIDs"].values())
6371
6372 for vlanID_range in self.config.get('vlanID_range'):
6373 start_vlanid, end_vlanid = vlanID_range.split("-")
6374 if start_vlanid > end_vlanid:
6375 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6376 vlanID_range))
6377
6378 for id in range(int(start_vlanid), int(end_vlanid) + 1):
6379 if id not in used_ids:
6380 vlan_id = id
6381 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6382 return vlan_id
6383 if vlan_id is None:
6384 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6385
6386
6387 def get_obj(self, content, vimtype, name):
6388 """
6389 Get the vsphere object associated with a given text name
6390 """
6391 obj = None
6392 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6393 for item in container.view:
6394 if item.name == name:
6395 obj = item
6396 break
6397 return obj
6398
6399
6400 def insert_media_to_vm(self, vapp, image_id):
6401 """
6402 Method to insert media CD-ROM (ISO image) from catalog to vm.
6403 vapp - vapp object to get vm id
6404 Image_id - image id for cdrom to be inerted to vm
6405 """
6406 # create connection object
6407 vca = self.connect()
6408 try:
6409 # fetching catalog details
6410 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6411 if vca._session:
6412 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6413 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6414 response = self.perform_request(req_type='GET',
6415 url=rest_url,
6416 headers=headers)
6417
6418 if response.status_code != 200:
6419 self.logger.error("REST call {} failed reason : {}"\
6420 "status code : {}".format(url_rest_call,
6421 response.content,
6422 response.status_code))
6423 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6424 "catalog details")
6425 # searching iso name and id
6426 iso_name,media_id = self.get_media_details(vca, response.content)
6427
6428 if iso_name and media_id:
6429 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6430 <ns6:MediaInsertOrEjectParams
6431 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6432 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6433 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6434 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6435 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6436 xmlns:ns7="http://www.vmware.com/schema/ovf"
6437 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6438 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6439 <ns6:Media
6440 type="application/vnd.vmware.vcloud.media+xml"
6441 name="{}"
6442 id="urn:vcloud:media:{}"
6443 href="https://{}/api/media/{}"/>
6444 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6445 self.url,media_id)
6446
6447 for vms in vapp.get_all_vms():
6448 vm_id = vms.get('id').split(':')[-1]
6449
6450 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6451 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6452
6453 response = self.perform_request(req_type='POST',
6454 url=rest_url,
6455 data=data,
6456 headers=headers)
6457
6458 if response.status_code != 202:
6459 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6460 "Status code {}".format(response.text, response.status_code)
6461 self.logger.error(error_msg)
6462 raise vimconn.vimconnException(error_msg)
6463 else:
6464 task = self.get_task_from_response(response.content)
6465 result = self.client.get_task_monitor().wait_for_success(task=task)
6466 if result.get('status') == 'success':
6467 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6468 " image to vm {}".format(vm_id))
6469
6470 except Exception as exp:
6471 self.logger.error("insert_media_to_vm() : exception occurred "\
6472 "while inserting media CD-ROM")
6473 raise vimconn.vimconnException(message=exp)
6474
6475
6476 def get_media_details(self, vca, content):
6477 """
6478 Method to get catalog item details
6479 vca - connection object
6480 content - Catalog details
6481 Return - Media name, media id
6482 """
6483 cataloghref_list = []
6484 try:
6485 if content:
6486 vm_list_xmlroot = XmlElementTree.fromstring(content)
6487 for child in vm_list_xmlroot.iter():
6488 if 'CatalogItem' in child.tag:
6489 cataloghref_list.append(child.attrib.get('href'))
6490 if cataloghref_list is not None:
6491 for href in cataloghref_list:
6492 if href:
6493 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6494 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6495 response = self.perform_request(req_type='GET',
6496 url=href,
6497 headers=headers)
6498 if response.status_code != 200:
6499 self.logger.error("REST call {} failed reason : {}"\
6500 "status code : {}".format(href,
6501 response.content,
6502 response.status_code))
6503 raise vimconn.vimconnException("get_media_details : Failed to get "\
6504 "catalogitem details")
6505 list_xmlroot = XmlElementTree.fromstring(response.content)
6506 for child in list_xmlroot.iter():
6507 if 'Entity' in child.tag:
6508 if 'media' in child.attrib.get('href'):
6509 name = child.attrib.get('name')
6510 media_id = child.attrib.get('href').split('/').pop()
6511 return name,media_id
6512 else:
6513 self.logger.debug("Media name and id not found")
6514 return False,False
6515 except Exception as exp:
6516 self.logger.error("get_media_details : exception occurred "\
6517 "getting media details")
6518 raise vimconn.vimconnException(message=exp)
6519
6520
6521 def retry_rest(self, method, url, add_headers=None, data=None):
6522 """ Method to get Token & retry respective REST request
6523 Args:
6524 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6525 url - request url to be used
6526 add_headers - Additional headers (optional)
6527 data - Request payload data to be passed in request
6528 Returns:
6529 response - Response of request
6530 """
6531 response = None
6532
6533 #Get token
6534 self.get_token()
6535
6536 if self.client._session:
6537 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6538 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6539
6540 if add_headers:
6541 headers.update(add_headers)
6542
6543 if method == 'GET':
6544 response = self.perform_request(req_type='GET',
6545 url=url,
6546 headers=headers)
6547 elif method == 'PUT':
6548 response = self.perform_request(req_type='PUT',
6549 url=url,
6550 headers=headers,
6551 data=data)
6552 elif method == 'POST':
6553 response = self.perform_request(req_type='POST',
6554 url=url,
6555 headers=headers,
6556 data=data)
6557 elif method == 'DELETE':
6558 response = self.perform_request(req_type='DELETE',
6559 url=url,
6560 headers=headers)
6561 return response
6562
6563
6564 def get_token(self):
6565 """ Generate a new token if expired
6566
6567 Returns:
6568 The return client object that letter can be used to connect to vCloud director as admin for VDC
6569 """
6570 try:
6571 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6572 self.user,
6573 self.org_name))
6574 host = self.url
6575 client = Client(host, verify_ssl_certs=False)
6576 client.set_highest_supported_version()
6577 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6578 # connection object
6579 self.client = client
6580
6581 except:
6582 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6583 "{} as user: {}".format(self.org_name, self.user))
6584
6585 if not client:
6586 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6587
6588
6589 def get_vdc_details(self):
6590 """ Get VDC details using pyVcloud Lib
6591
6592 Returns org and vdc object
6593 """
6594 vdc = None
6595 try:
6596 org = Org(self.client, resource=self.client.get_org())
6597 vdc = org.get_vdc(self.tenant_name)
6598 except Exception as e:
6599 # pyvcloud not giving a specific exception, Refresh nevertheless
6600 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6601
6602 #Retry once, if failed by refreshing token
6603 if vdc is None:
6604 self.get_token()
6605 org = Org(self.client, resource=self.client.get_org())
6606 vdc = org.get_vdc(self.tenant_name)
6607
6608 return org, vdc
6609
6610
6611 def perform_request(self, req_type, url, headers=None, data=None):
6612 """Perform the POST/PUT/GET/DELETE request."""
6613
6614 #Log REST request details
6615 self.log_request(req_type, url=url, headers=headers, data=data)
6616 # perform request and return its result
6617 if req_type == 'GET':
6618 response = requests.get(url=url,
6619 headers=headers,
6620 verify=False)
6621 elif req_type == 'PUT':
6622 response = requests.put(url=url,
6623 headers=headers,
6624 data=data,
6625 verify=False)
6626 elif req_type == 'POST':
6627 response = requests.post(url=url,
6628 headers=headers,
6629 data=data,
6630 verify=False)
6631 elif req_type == 'DELETE':
6632 response = requests.delete(url=url,
6633 headers=headers,
6634 verify=False)
6635 #Log the REST response
6636 self.log_response(response)
6637
6638 return response
6639
6640
6641 def log_request(self, req_type, url=None, headers=None, data=None):
6642 """Logs REST request details"""
6643
6644 if req_type is not None:
6645 self.logger.debug("Request type: {}".format(req_type))
6646
6647 if url is not None:
6648 self.logger.debug("Request url: {}".format(url))
6649
6650 if headers is not None:
6651 for header in headers:
6652 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6653
6654 if data is not None:
6655 self.logger.debug("Request data: {}".format(data))
6656
6657
6658 def log_response(self, response):
6659 """Logs REST response details"""
6660
6661 self.logger.debug("Response status code: {} ".format(response.status_code))
6662
6663
6664 def get_task_from_response(self, content):
6665 """
6666 content - API response content(response.content)
6667 return task object
6668 """
6669 xmlroot = XmlElementTree.fromstring(content)
6670 if xmlroot.tag.split('}')[1] == "Task":
6671 return xmlroot
6672 else:
6673 for ele in xmlroot:
6674 if ele.tag.split("}")[1] == "Tasks":
6675 task = ele[0]
6676 break
6677 return task
6678
6679
6680 def power_on_vapp(self,vapp_id, vapp_name):
6681 """
6682 vapp_id - vApp uuid
6683 vapp_name - vAapp name
6684 return - Task object
6685 """
6686 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6687 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6688
6689 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6690 vapp_id)
6691 response = self.perform_request(req_type='POST',
6692 url=poweron_href,
6693 headers=headers)
6694
6695 if response.status_code != 202:
6696 self.logger.error("REST call {} failed reason : {}"\
6697 "status code : {} ".format(poweron_href,
6698 response.content,
6699 response.status_code))
6700 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6701 "vApp {}".format(vapp_name))
6702 else:
6703 poweron_task = self.get_task_from_response(response.content)
6704 return poweron_task
6705
6706