VCD feature 7193-provider_nerwork
[osm/RO.git] / osm_ro / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import itertools
37 import requests
38 import ssl
39 import atexit
40
41 from pyVmomi import vim, vmodl
42 from pyVim.connect import SmartConnect, Disconnect
43
44 from xml.etree import ElementTree as XmlElementTree
45 from lxml import etree as lxmlElementTree
46
47 import yaml
48 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
49 from pyvcloud.vcd.vdc import VDC
50 from pyvcloud.vcd.org import Org
51 import re
52 from pyvcloud.vcd.vapp import VApp
53 from xml.sax.saxutils import escape
54 import logging
55 import json
56 import time
57 import uuid
58 import httplib
59 #For python3
60 #import http.client
61 import hashlib
62 import socket
63 import struct
64 import netaddr
65 import random
66
67 # global variable for vcd connector type
68 STANDALONE = 'standalone'
69
70 # key for flavor dicts
71 FLAVOR_RAM_KEY = 'ram'
72 FLAVOR_VCPUS_KEY = 'vcpus'
73 FLAVOR_DISK_KEY = 'disk'
74 DEFAULT_IP_PROFILE = {'dhcp_count':50,
75 'dhcp_enabled':True,
76 'ip_version':"IPv4"
77 }
78 # global variable for wait time
79 INTERVAL_TIME = 5
80 MAX_WAIT_TIME = 1800
81
82 API_VERSION = '27.0'
83
84 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
85 __date__ = "$09-Mar-2018 11:09:29$"
86 __version__ = '0.2'
87
88 # -1: "Could not be created",
89 # 0: "Unresolved",
90 # 1: "Resolved",
91 # 2: "Deployed",
92 # 3: "Suspended",
93 # 4: "Powered on",
94 # 5: "Waiting for user input",
95 # 6: "Unknown state",
96 # 7: "Unrecognized state",
97 # 8: "Powered off",
98 # 9: "Inconsistent state",
99 # 10: "Children do not all have the same status",
100 # 11: "Upload initiated, OVF descriptor pending",
101 # 12: "Upload initiated, copying contents",
102 # 13: "Upload initiated , disk contents pending",
103 # 14: "Upload has been quarantined",
104 # 15: "Upload quarantine period has expired"
105
106 # mapping vCD status to MANO
107 vcdStatusCode2manoFormat = {4: 'ACTIVE',
108 7: 'PAUSED',
109 3: 'SUSPENDED',
110 8: 'INACTIVE',
111 12: 'BUILD',
112 -1: 'ERROR',
113 14: 'DELETED'}
114
115 #
116 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
117 'ERROR': 'ERROR', 'DELETED': 'DELETED'
118 }
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '%s'" % str(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '%s'" % str(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False)
309 client_as_admin.set_highest_supported_version()
310 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
311 except Exception as e:
312 raise vimconn.vimconnException(
313 "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
314
315 return client_as_admin
316
317 def connect(self):
318 """ Method connect as normal user to vCloud director.
319
320 Returns:
321 The return client object that latter can be used to connect to vCloud director as admin for VDC
322 """
323 try:
324 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
325 self.user,
326 self.org_name))
327 host = self.url
328 client = Client(host, verify_ssl_certs=False)
329 client.set_highest_supported_version()
330 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
331 except:
332 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
333 "{} as user: {}".format(self.org_name, self.user))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except:
393 self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
449 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
450 #For python3
451 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
452 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
453 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
454 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
455
456 response = self.perform_request(req_type='DELETE',
457 url=vdc_remove_href,
458 headers=headers)
459
460 if response.status_code == 202:
461 time.sleep(5)
462 return tenant_id
463 else:
464 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
465 "Return status code {}".format(vdc_remove_href,
466 response.status_code))
467 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
468 else:
469 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
470 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
471
472
473 def get_tenant_list(self, filter_dict={}):
474 """Obtain tenants of VIM
475 filter_dict can contain the following keys:
476 name: filter by tenant name
477 id: filter by tenant uuid/id
478 <other VIM specific>
479 Returns the tenant list of dictionaries:
480 [{'name':'<name>, 'id':'<id>, ...}, ...]
481
482 """
483 org_dict = self.get_org(self.org_uuid)
484 vdcs_dict = org_dict['vdcs']
485
486 vdclist = []
487 try:
488 for k in vdcs_dict:
489 entry = {'name': vdcs_dict[k], 'id': k}
490 # if caller didn't specify dictionary we return all tenants.
491 if filter_dict is not None and filter_dict:
492 filtered_entry = entry.copy()
493 filtered_dict = set(entry.keys()) - set(filter_dict)
494 for unwanted_key in filtered_dict: del entry[unwanted_key]
495 if filter_dict == entry:
496 vdclist.append(filtered_entry)
497 else:
498 vdclist.append(entry)
499 except:
500 self.logger.debug("Error in get_tenant_list()")
501 self.logger.debug(traceback.format_exc())
502 raise vimconn.vimconnException("Incorrect state. {}")
503
504 return vdclist
505
506 def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
507 """Adds a tenant network to VIM
508 Params:
509 'net_name': name of the network
510 'net_type': one of:
511 'bridge': overlay isolated network
512 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
513 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
514 'ip_profile': is a dict containing the IP parameters of the network
515 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
516 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
517 'gateway_address': (Optional) ip_schema, that is X.X.X.X
518 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
519 'dhcp_enabled': True or False
520 'dhcp_start_address': ip_schema, first IP to grant
521 'dhcp_count': number of IPs to grant.
522 'shared': if this network can be seen/use by other tenants/organization
523 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
524 Returns a tuple with the network identifier and created_items, or raises an exception on error
525 created_items can be None or a dictionary where this method can include key-values that will be passed to
526 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
527 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
528 as not present.
529 """
530
531 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
532 .format(net_name, net_type, ip_profile, shared, provider_network_profile))
533 vlan = None
534 if provider_network_profile:
535 vlan = provider_network_profile.get("segmentation-id")
536
537 created_items = {}
538 isshared = 'false'
539 if shared:
540 isshared = 'true'
541
542 # ############# Stub code for SRIOV #################
543 # if net_type == "data" or net_type == "ptp":
544 # if self.config.get('dv_switch_name') == None:
545 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
546 # network_uuid = self.create_dvPort_group(net_name)
547 parent_network_uuid = None
548
549 import traceback
550 traceback.print_stack()
551
552 if provider_network_profile is not None:
553 for k, v in provider_network_profile.items():
554 if k == 'physical_network':
555 parent_network_uuid = self.get_physical_network_by_name(v)
556
557 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
558 ip_profile=ip_profile, isshared=isshared,
559 parent_network_uuid=parent_network_uuid)
560 if network_uuid is not None:
561 return network_uuid, created_items
562 else:
563 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
564
565 def get_vcd_network_list(self):
566 """ Method available organization for a logged in tenant
567
568 Returns:
569 The return vca object that letter can be used to connect to vcloud direct as admin
570 """
571
572 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
573
574 if not self.tenant_name:
575 raise vimconn.vimconnConnectionException("Tenant name is empty.")
576
577 org, vdc = self.get_vdc_details()
578 if vdc is None:
579 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
580
581 vdc_uuid = vdc.get('id').split(":")[3]
582 if self.client._session:
583 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
584 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
585 response = self.perform_request(req_type='GET',
586 url=vdc.get('href'),
587 headers=headers)
588 if response.status_code != 200:
589 self.logger.error("Failed to get vdc content")
590 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
591 else:
592 content = XmlElementTree.fromstring(response.content)
593
594 network_list = []
595 try:
596 for item in content:
597 if item.tag.split('}')[-1] == 'AvailableNetworks':
598 for net in item:
599 response = self.perform_request(req_type='GET',
600 url=net.get('href'),
601 headers=headers)
602
603 if response.status_code != 200:
604 self.logger.error("Failed to get network content")
605 raise vimconn.vimconnNotFoundException("Failed to get network content")
606 else:
607 net_details = XmlElementTree.fromstring(response.content)
608
609 filter_dict = {}
610 net_uuid = net_details.get('id').split(":")
611 if len(net_uuid) != 4:
612 continue
613 else:
614 net_uuid = net_uuid[3]
615 # create dict entry
616 self.logger.debug("get_vcd_network_list(): Adding network {} "
617 "to a list vcd id {} network {}".format(net_uuid,
618 vdc_uuid,
619 net_details.get('name')))
620 filter_dict["name"] = net_details.get('name')
621 filter_dict["id"] = net_uuid
622 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
623 shared = True
624 else:
625 shared = False
626 filter_dict["shared"] = shared
627 filter_dict["tenant_id"] = vdc_uuid
628 if int(net_details.get('status')) == 1:
629 filter_dict["admin_state_up"] = True
630 else:
631 filter_dict["admin_state_up"] = False
632 filter_dict["status"] = "ACTIVE"
633 filter_dict["type"] = "bridge"
634 network_list.append(filter_dict)
635 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
636 except:
637 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
638 pass
639
640 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
641 return network_list
642
643 def get_network_list(self, filter_dict={}):
644 """Obtain tenant networks of VIM
645 Filter_dict can be:
646 name: network name OR/AND
647 id: network uuid OR/AND
648 shared: boolean OR/AND
649 tenant_id: tenant OR/AND
650 admin_state_up: boolean
651 status: 'ACTIVE'
652
653 [{key : value , key : value}]
654
655 Returns the network list of dictionaries:
656 [{<the fields at Filter_dict plus some VIM specific>}, ...]
657 List can be empty
658 """
659
660 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
661
662 if not self.tenant_name:
663 raise vimconn.vimconnConnectionException("Tenant name is empty.")
664
665 org, vdc = self.get_vdc_details()
666 if vdc is None:
667 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
668
669 try:
670 vdcid = vdc.get('id').split(":")[3]
671
672 if self.client._session:
673 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
674 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
675 response = self.perform_request(req_type='GET',
676 url=vdc.get('href'),
677 headers=headers)
678 if response.status_code != 200:
679 self.logger.error("Failed to get vdc content")
680 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
681 else:
682 content = XmlElementTree.fromstring(response.content)
683
684 network_list = []
685 for item in content:
686 if item.tag.split('}')[-1] == 'AvailableNetworks':
687 for net in item:
688 response = self.perform_request(req_type='GET',
689 url=net.get('href'),
690 headers=headers)
691
692 if response.status_code != 200:
693 self.logger.error("Failed to get network content")
694 raise vimconn.vimconnNotFoundException("Failed to get network content")
695 else:
696 net_details = XmlElementTree.fromstring(response.content)
697
698 filter_entry = {}
699 net_uuid = net_details.get('id').split(":")
700 if len(net_uuid) != 4:
701 continue
702 else:
703 net_uuid = net_uuid[3]
704 # create dict entry
705 self.logger.debug("get_network_list(): Adding net {}"
706 " to a list vcd id {} network {}".format(net_uuid,
707 vdcid,
708 net_details.get('name')))
709 filter_entry["name"] = net_details.get('name')
710 filter_entry["id"] = net_uuid
711 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
712 shared = True
713 else:
714 shared = False
715 filter_entry["shared"] = shared
716 filter_entry["tenant_id"] = vdcid
717 if int(net_details.get('status')) == 1:
718 filter_entry["admin_state_up"] = True
719 else:
720 filter_entry["admin_state_up"] = False
721 filter_entry["status"] = "ACTIVE"
722 filter_entry["type"] = "bridge"
723 filtered_entry = filter_entry.copy()
724
725 if filter_dict is not None and filter_dict:
726 # we remove all the key : value we don't care and match only
727 # respected field
728 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
729 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
730 if filter_dict == filter_entry:
731 network_list.append(filtered_entry)
732 else:
733 network_list.append(filtered_entry)
734 except Exception as e:
735 self.logger.debug("Error in get_network_list",exc_info=True)
736 if isinstance(e, vimconn.vimconnException):
737 raise
738 else:
739 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
740
741 self.logger.debug("Returning {}".format(network_list))
742 return network_list
743
744 def get_network(self, net_id):
745 """Method obtains network details of net_id VIM network
746 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
747
748 try:
749 org, vdc = self.get_vdc_details()
750 vdc_id = vdc.get('id').split(":")[3]
751 if self.client._session:
752 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
753 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
754 response = self.perform_request(req_type='GET',
755 url=vdc.get('href'),
756 headers=headers)
757 if response.status_code != 200:
758 self.logger.error("Failed to get vdc content")
759 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
760 else:
761 content = XmlElementTree.fromstring(response.content)
762
763 filter_dict = {}
764
765 for item in content:
766 if item.tag.split('}')[-1] == 'AvailableNetworks':
767 for net in item:
768 response = self.perform_request(req_type='GET',
769 url=net.get('href'),
770 headers=headers)
771
772 if response.status_code != 200:
773 self.logger.error("Failed to get network content")
774 raise vimconn.vimconnNotFoundException("Failed to get network content")
775 else:
776 net_details = XmlElementTree.fromstring(response.content)
777
778 vdc_network_id = net_details.get('id').split(":")
779 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
780 filter_dict["name"] = net_details.get('name')
781 filter_dict["id"] = vdc_network_id[3]
782 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
783 shared = True
784 else:
785 shared = False
786 filter_dict["shared"] = shared
787 filter_dict["tenant_id"] = vdc_id
788 if int(net_details.get('status')) == 1:
789 filter_dict["admin_state_up"] = True
790 else:
791 filter_dict["admin_state_up"] = False
792 filter_dict["status"] = "ACTIVE"
793 filter_dict["type"] = "bridge"
794 self.logger.debug("Returning {}".format(filter_dict))
795 return filter_dict
796 else:
797 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
798 except Exception as e:
799 self.logger.debug("Error in get_network")
800 self.logger.debug(traceback.format_exc())
801 if isinstance(e, vimconn.vimconnException):
802 raise
803 else:
804 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
805
806 return filter_dict
807
808 def delete_network(self, net_id, created_items=None):
809 """
810 Removes a tenant network from VIM and its associated elements
811 :param net_id: VIM identifier of the network, provided by method new_network
812 :param created_items: dictionary with extra items to be deleted. provided by method new_network
813 Returns the network identifier or raises an exception upon error or when network is not found
814 """
815
816 # ############# Stub code for SRIOV #################
817 # dvport_group = self.get_dvport_group(net_id)
818 # if dvport_group:
819 # #delete portgroup
820 # status = self.destroy_dvport_group(net_id)
821 # if status:
822 # # Remove vlanID from persistent info
823 # if net_id in self.persistent_info["used_vlanIDs"]:
824 # del self.persistent_info["used_vlanIDs"][net_id]
825 #
826 # return net_id
827
828 vcd_network = self.get_vcd_network(network_uuid=net_id)
829 if vcd_network is not None and vcd_network:
830 if self.delete_network_action(network_uuid=net_id):
831 return net_id
832 else:
833 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
834
835 def refresh_nets_status(self, net_list):
836 """Get the status of the networks
837 Params: the list of network identifiers
838 Returns a dictionary with:
839 net_id: #VIM id of this network
840 status: #Mandatory. Text with one of:
841 # DELETED (not found at vim)
842 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
843 # OTHER (Vim reported other status not understood)
844 # ERROR (VIM indicates an ERROR status)
845 # ACTIVE, INACTIVE, DOWN (admin down),
846 # BUILD (on building process)
847 #
848 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
849 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
850
851 """
852
853 dict_entry = {}
854 try:
855 for net in net_list:
856 errormsg = ''
857 vcd_network = self.get_vcd_network(network_uuid=net)
858 if vcd_network is not None and vcd_network:
859 if vcd_network['status'] == '1':
860 status = 'ACTIVE'
861 else:
862 status = 'DOWN'
863 else:
864 status = 'DELETED'
865 errormsg = 'Network not found.'
866
867 dict_entry[net] = {'status': status, 'error_msg': errormsg,
868 'vim_info': yaml.safe_dump(vcd_network)}
869 except:
870 self.logger.debug("Error in refresh_nets_status")
871 self.logger.debug(traceback.format_exc())
872
873 return dict_entry
874
875 def get_flavor(self, flavor_id):
876 """Obtain flavor details from the VIM
877 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
878 """
879 if flavor_id not in vimconnector.flavorlist:
880 raise vimconn.vimconnNotFoundException("Flavor not found.")
881 return vimconnector.flavorlist[flavor_id]
882
883 def new_flavor(self, flavor_data):
884 """Adds a tenant flavor to VIM
885 flavor_data contains a dictionary with information, keys:
886 name: flavor name
887 ram: memory (cloud type) in MBytes
888 vpcus: cpus (cloud type)
889 extended: EPA parameters
890 - numas: #items requested in same NUMA
891 memory: number of 1G huge pages memory
892 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
893 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
894 - name: interface name
895 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
896 bandwidth: X Gbps; requested guarantee bandwidth
897 vpci: requested virtual PCI address
898 disk: disk size
899 is_public:
900 #TODO to concrete
901 Returns the flavor identifier"""
902
903 # generate a new uuid put to internal dict and return it.
904 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
905 new_flavor=flavor_data
906 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
907 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
908 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
909
910 if not isinstance(ram, int):
911 raise vimconn.vimconnException("Non-integer value for ram")
912 elif not isinstance(cpu, int):
913 raise vimconn.vimconnException("Non-integer value for cpu")
914 elif not isinstance(disk, int):
915 raise vimconn.vimconnException("Non-integer value for disk")
916
917 extended_flv = flavor_data.get("extended")
918 if extended_flv:
919 numas=extended_flv.get("numas")
920 if numas:
921 for numa in numas:
922 #overwrite ram and vcpus
923 if 'memory' in numa:
924 ram = numa['memory']*1024
925 if 'paired-threads' in numa:
926 cpu = numa['paired-threads']*2
927 elif 'cores' in numa:
928 cpu = numa['cores']
929 elif 'threads' in numa:
930 cpu = numa['threads']
931
932 new_flavor[FLAVOR_RAM_KEY] = ram
933 new_flavor[FLAVOR_VCPUS_KEY] = cpu
934 new_flavor[FLAVOR_DISK_KEY] = disk
935 # generate a new uuid put to internal dict and return it.
936 flavor_id = uuid.uuid4()
937 vimconnector.flavorlist[str(flavor_id)] = new_flavor
938 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
939
940 return str(flavor_id)
941
942 def delete_flavor(self, flavor_id):
943 """Deletes a tenant flavor from VIM identify by its id
944
945 Returns the used id or raise an exception
946 """
947 if flavor_id not in vimconnector.flavorlist:
948 raise vimconn.vimconnNotFoundException("Flavor not found.")
949
950 vimconnector.flavorlist.pop(flavor_id, None)
951 return flavor_id
952
953 def new_image(self, image_dict):
954 """
955 Adds a tenant image to VIM
956 Returns:
957 200, image-id if the image is created
958 <0, message if there is an error
959 """
960
961 return self.get_image_id_from_path(image_dict['location'])
962
963 def delete_image(self, image_id):
964 """
965 Deletes a tenant image from VIM
966 Args:
967 image_id is ID of Image to be deleted
968 Return:
969 returns the image identifier in UUID format or raises an exception on error
970 """
971 conn = self.connect_as_admin()
972 if not conn:
973 raise vimconn.vimconnConnectionException("Failed to connect vCD")
974 # Get Catalog details
975 url_list = [self.url, '/api/catalog/', image_id]
976 catalog_herf = ''.join(url_list)
977
978 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
979 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
980
981 response = self.perform_request(req_type='GET',
982 url=catalog_herf,
983 headers=headers)
984
985 if response.status_code != requests.codes.ok:
986 self.logger.debug("delete_image():GET REST API call {} failed. "\
987 "Return status code {}".format(catalog_herf,
988 response.status_code))
989 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
990
991 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
992 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
993 #For python3
994 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
995 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
996
997 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
998 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
999 for catalogItem in catalogItems:
1000 catalogItem_href = catalogItem.attrib['href']
1001
1002 response = self.perform_request(req_type='GET',
1003 url=catalogItem_href,
1004 headers=headers)
1005
1006 if response.status_code != requests.codes.ok:
1007 self.logger.debug("delete_image():GET REST API call {} failed. "\
1008 "Return status code {}".format(catalog_herf,
1009 response.status_code))
1010 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
1011 catalogItem,
1012 image_id))
1013
1014 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1015 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
1016 #For python3
1017 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
1018 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
1019 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
1020
1021 #Remove catalogItem
1022 response = self.perform_request(req_type='DELETE',
1023 url=catalogitem_remove_href,
1024 headers=headers)
1025 if response.status_code == requests.codes.no_content:
1026 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1027 else:
1028 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1029
1030 #Remove catalog
1031 url_list = [self.url, '/api/admin/catalog/', image_id]
1032 catalog_remove_herf = ''.join(url_list)
1033 response = self.perform_request(req_type='DELETE',
1034 url=catalog_remove_herf,
1035 headers=headers)
1036
1037 if response.status_code == requests.codes.no_content:
1038 self.logger.debug("Deleted Catalog {}".format(image_id))
1039 return image_id
1040 else:
1041 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1042
1043
1044 def catalog_exists(self, catalog_name, catalogs):
1045 """
1046
1047 :param catalog_name:
1048 :param catalogs:
1049 :return:
1050 """
1051 for catalog in catalogs:
1052 if catalog['name'] == catalog_name:
1053 return catalog['id']
1054
1055 def create_vimcatalog(self, vca=None, catalog_name=None):
1056 """ Create new catalog entry in vCloud director.
1057
1058 Args
1059 vca: vCloud director.
1060 catalog_name catalog that client wish to create. Note no validation done for a name.
1061 Client must make sure that provide valid string representation.
1062
1063 Returns catalog id if catalog created else None.
1064
1065 """
1066 try:
1067 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1068 if lxml_catalog_element:
1069 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1070 return id_attr_value.split(':')[-1]
1071 catalogs = vca.list_catalogs()
1072 except Exception as ex:
1073 self.logger.error(
1074 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1075 raise
1076 return self.catalog_exists(catalog_name, catalogs)
1077
1078 # noinspection PyIncorrectDocstring
1079 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1080 description='', progress=False, chunk_bytes=128 * 1024):
1081 """
1082 Uploads a OVF file to a vCloud catalog
1083
1084 :param chunk_bytes:
1085 :param progress:
1086 :param description:
1087 :param image_name:
1088 :param vca:
1089 :param catalog_name: (str): The name of the catalog to upload the media.
1090 :param media_file_name: (str): The name of the local media file to upload.
1091 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1092 """
1093 os.path.isfile(media_file_name)
1094 statinfo = os.stat(media_file_name)
1095
1096 # find a catalog entry where we upload OVF.
1097 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1098 # status change.
1099 # if VCD can parse OVF we upload VMDK file
1100 try:
1101 for catalog in vca.list_catalogs():
1102 if catalog_name != catalog['name']:
1103 continue
1104 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1105 data = """
1106 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1107 """.format(catalog_name, description)
1108
1109 if self.client:
1110 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1111 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1112 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1113
1114 response = self.perform_request(req_type='POST',
1115 url=catalog_href,
1116 headers=headers,
1117 data=data)
1118
1119 if response.status_code == requests.codes.created:
1120 catalogItem = XmlElementTree.fromstring(response.content)
1121 entity = [child for child in catalogItem if
1122 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1123 href = entity.get('href')
1124 template = href
1125
1126 response = self.perform_request(req_type='GET',
1127 url=href,
1128 headers=headers)
1129
1130 if response.status_code == requests.codes.ok:
1131 headers['Content-Type'] = 'Content-Type text/xml'
1132 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
1133 if result:
1134 transfer_href = result.group(1)
1135
1136 response = self.perform_request(req_type='PUT',
1137 url=transfer_href,
1138 headers=headers,
1139 data=open(media_file_name, 'rb'))
1140 if response.status_code != requests.codes.ok:
1141 self.logger.debug(
1142 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1143 media_file_name))
1144 return False
1145
1146 # TODO fix this with aync block
1147 time.sleep(5)
1148
1149 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1150
1151 # uploading VMDK file
1152 # check status of OVF upload and upload remaining files.
1153 response = self.perform_request(req_type='GET',
1154 url=template,
1155 headers=headers)
1156
1157 if response.status_code == requests.codes.ok:
1158 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
1159 if result:
1160 link_href = result.group(1)
1161 # we skip ovf since it already uploaded.
1162 if 'ovf' in link_href:
1163 continue
1164 # The OVF file and VMDK must be in a same directory
1165 head, tail = os.path.split(media_file_name)
1166 file_vmdk = head + '/' + link_href.split("/")[-1]
1167 if not os.path.isfile(file_vmdk):
1168 return False
1169 statinfo = os.stat(file_vmdk)
1170 if statinfo.st_size == 0:
1171 return False
1172 hrefvmdk = link_href
1173
1174 if progress:
1175 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1176 FileTransferSpeed()]
1177 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1178
1179 bytes_transferred = 0
1180 f = open(file_vmdk, 'rb')
1181 while bytes_transferred < statinfo.st_size:
1182 my_bytes = f.read(chunk_bytes)
1183 if len(my_bytes) <= chunk_bytes:
1184 headers['Content-Range'] = 'bytes %s-%s/%s' % (
1185 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1186 headers['Content-Length'] = str(len(my_bytes))
1187 response = requests.put(url=hrefvmdk,
1188 headers=headers,
1189 data=my_bytes,
1190 verify=False)
1191 if response.status_code == requests.codes.ok:
1192 bytes_transferred += len(my_bytes)
1193 if progress:
1194 progress_bar.update(bytes_transferred)
1195 else:
1196 self.logger.debug(
1197 'file upload failed with error: [%s] %s' % (response.status_code,
1198 response.content))
1199
1200 f.close()
1201 return False
1202 f.close()
1203 if progress:
1204 progress_bar.finish()
1205 time.sleep(10)
1206 return True
1207 else:
1208 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1209 format(catalog_name, media_file_name))
1210 return False
1211 except Exception as exp:
1212 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1213 .format(catalog_name,media_file_name, exp))
1214 raise vimconn.vimconnException(
1215 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1216 .format(catalog_name,media_file_name, exp))
1217
1218 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1219 return False
1220
1221 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1222 """Upload media file"""
1223 # TODO add named parameters for readability
1224
1225 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1226 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1227
1228 def validate_uuid4(self, uuid_string=None):
1229 """ Method validate correct format of UUID.
1230
1231 Return: true if string represent valid uuid
1232 """
1233 try:
1234 val = uuid.UUID(uuid_string, version=4)
1235 except ValueError:
1236 return False
1237 return True
1238
1239 def get_catalogid(self, catalog_name=None, catalogs=None):
1240 """ Method check catalog and return catalog ID in UUID format.
1241
1242 Args
1243 catalog_name: catalog name as string
1244 catalogs: list of catalogs.
1245
1246 Return: catalogs uuid
1247 """
1248
1249 for catalog in catalogs:
1250 if catalog['name'] == catalog_name:
1251 catalog_id = catalog['id']
1252 return catalog_id
1253 return None
1254
1255 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1256 """ Method check catalog and return catalog name lookup done by catalog UUID.
1257
1258 Args
1259 catalog_name: catalog name as string
1260 catalogs: list of catalogs.
1261
1262 Return: catalogs name or None
1263 """
1264
1265 if not self.validate_uuid4(uuid_string=catalog_uuid):
1266 return None
1267
1268 for catalog in catalogs:
1269 catalog_id = catalog.get('id')
1270 if catalog_id == catalog_uuid:
1271 return catalog.get('name')
1272 return None
1273
1274 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1275 """ Method check catalog and return catalog name lookup done by catalog UUID.
1276
1277 Args
1278 catalog_name: catalog name as string
1279 catalogs: list of catalogs.
1280
1281 Return: catalogs name or None
1282 """
1283
1284 if not self.validate_uuid4(uuid_string=catalog_uuid):
1285 return None
1286
1287 for catalog in catalogs:
1288 catalog_id = catalog.get('id')
1289 if catalog_id == catalog_uuid:
1290 return catalog
1291 return None
1292
1293 def get_image_id_from_path(self, path=None, progress=False):
1294 """ Method upload OVF image to vCloud director.
1295
1296 Each OVF image represented as single catalog entry in vcloud director.
1297 The method check for existing catalog entry. The check done by file name without file extension.
1298
1299 if given catalog name already present method will respond with existing catalog uuid otherwise
1300 it will create new catalog entry and upload OVF file to newly created catalog.
1301
1302 If method can't create catalog entry or upload a file it will throw exception.
1303
1304 Method accept boolean flag progress that will output progress bar. It useful method
1305 for standalone upload use case. In case to test large file upload.
1306
1307 Args
1308 path: - valid path to OVF file.
1309 progress - boolean progress bar show progress bar.
1310
1311 Return: if image uploaded correct method will provide image catalog UUID.
1312 """
1313
1314 if not path:
1315 raise vimconn.vimconnException("Image path can't be None.")
1316
1317 if not os.path.isfile(path):
1318 raise vimconn.vimconnException("Can't read file. File not found.")
1319
1320 if not os.access(path, os.R_OK):
1321 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1322
1323 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1324
1325 dirpath, filename = os.path.split(path)
1326 flname, file_extension = os.path.splitext(path)
1327 if file_extension != '.ovf':
1328 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1329 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1330
1331 catalog_name = os.path.splitext(filename)[0]
1332 catalog_md5_name = hashlib.md5(path).hexdigest()
1333 self.logger.debug("File name {} Catalog Name {} file path {} "
1334 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1335
1336 try:
1337 org,vdc = self.get_vdc_details()
1338 catalogs = org.list_catalogs()
1339 except Exception as exp:
1340 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1341 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1342
1343 if len(catalogs) == 0:
1344 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1345 if self.create_vimcatalog(org, catalog_md5_name) is None:
1346 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1347
1348 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1349 media_name=filename, medial_file_name=path, progress=progress)
1350 if not result:
1351 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1352 return self.get_catalogid(catalog_name, catalogs)
1353 else:
1354 for catalog in catalogs:
1355 # search for existing catalog if we find same name we return ID
1356 # TODO optimize this
1357 if catalog['name'] == catalog_md5_name:
1358 self.logger.debug("Found existing catalog entry for {} "
1359 "catalog id {}".format(catalog_name,
1360 self.get_catalogid(catalog_md5_name, catalogs)))
1361 return self.get_catalogid(catalog_md5_name, catalogs)
1362
1363 # if we didn't find existing catalog we create a new one and upload image.
1364 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1365 if self.create_vimcatalog(org, catalog_md5_name) is None:
1366 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1367
1368 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1369 media_name=filename, medial_file_name=path, progress=progress)
1370 if not result:
1371 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1372
1373 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1374
1375 def get_image_list(self, filter_dict={}):
1376 '''Obtain tenant images from VIM
1377 Filter_dict can be:
1378 name: image name
1379 id: image uuid
1380 checksum: image checksum
1381 location: image path
1382 Returns the image list of dictionaries:
1383 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1384 List can be empty
1385 '''
1386
1387 try:
1388 org, vdc = self.get_vdc_details()
1389 image_list = []
1390 catalogs = org.list_catalogs()
1391 if len(catalogs) == 0:
1392 return image_list
1393 else:
1394 for catalog in catalogs:
1395 catalog_uuid = catalog.get('id')
1396 name = catalog.get('name')
1397 filtered_dict = {}
1398 if filter_dict.get("name") and filter_dict["name"] != name:
1399 continue
1400 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1401 continue
1402 filtered_dict ["name"] = name
1403 filtered_dict ["id"] = catalog_uuid
1404 image_list.append(filtered_dict)
1405
1406 self.logger.debug("List of already created catalog items: {}".format(image_list))
1407 return image_list
1408 except Exception as exp:
1409 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1410
1411 def get_vappid(self, vdc=None, vapp_name=None):
1412 """ Method takes vdc object and vApp name and returns vapp uuid or None
1413
1414 Args:
1415 vdc: The VDC object.
1416 vapp_name: is application vappp name identifier
1417
1418 Returns:
1419 The return vApp name otherwise None
1420 """
1421 if vdc is None or vapp_name is None:
1422 return None
1423 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1424 try:
1425 refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1426 vdc.ResourceEntities.ResourceEntity)
1427 #For python3
1428 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1429 # if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1430 if len(refs) == 1:
1431 return refs[0].href.split("vapp")[1][1:]
1432 except Exception as e:
1433 self.logger.exception(e)
1434 return False
1435 return None
1436
1437 def check_vapp(self, vdc=None, vapp_uuid=None):
1438 """ Method Method returns True or False if vapp deployed in vCloud director
1439
1440 Args:
1441 vca: Connector to VCA
1442 vdc: The VDC object.
1443 vappid: vappid is application identifier
1444
1445 Returns:
1446 The return True if vApp deployed
1447 :param vdc:
1448 :param vapp_uuid:
1449 """
1450 try:
1451 refs = filter(lambda ref:
1452 ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
1453 vdc.ResourceEntities.ResourceEntity)
1454 #For python3
1455 #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1456 # if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1457 for ref in refs:
1458 vappid = ref.href.split("vapp")[1][1:]
1459 # find vapp with respected vapp uuid
1460 if vappid == vapp_uuid:
1461 return True
1462 except Exception as e:
1463 self.logger.exception(e)
1464 return False
1465 return False
1466
1467 def get_namebyvappid(self, vapp_uuid=None):
1468 """Method returns vApp name from vCD and lookup done by vapp_id.
1469
1470 Args:
1471 vapp_uuid: vappid is application identifier
1472
1473 Returns:
1474 The return vApp name otherwise None
1475 """
1476 try:
1477 if self.client and vapp_uuid:
1478 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1479 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1480 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1481
1482 response = self.perform_request(req_type='GET',
1483 url=vapp_call,
1484 headers=headers)
1485 #Retry login if session expired & retry sending request
1486 if response.status_code == 403:
1487 response = self.retry_rest('GET', vapp_call)
1488
1489 tree = XmlElementTree.fromstring(response.content)
1490 return tree.attrib['name']
1491 except Exception as e:
1492 self.logger.exception(e)
1493 return None
1494 return None
1495
1496 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1497 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1498 """Adds a VM instance to VIM
1499 Params:
1500 'start': (boolean) indicates if VM must start or created in pause mode.
1501 'image_id','flavor_id': image and flavor VIM id to use for the VM
1502 'net_list': list of interfaces, each one is a dictionary with:
1503 'name': (optional) name for the interface.
1504 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1505 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1506 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1507 'mac_address': (optional) mac address to assign to this interface
1508 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1509 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1510 'type': (mandatory) can be one of:
1511 'virtual', in this case always connected to a network of type 'net_type=bridge'
1512 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1513 can created unconnected
1514 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1515 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1516 are allocated on the same physical NIC
1517 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1518 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1519 or True, it must apply the default VIM behaviour
1520 After execution the method will add the key:
1521 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1522 interface. 'net_list' is modified
1523 'cloud_config': (optional) dictionary with:
1524 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1525 'users': (optional) list of users to be inserted, each item is a dict with:
1526 'name': (mandatory) user name,
1527 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1528 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1529 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1530 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1531 'dest': (mandatory) string with the destination absolute path
1532 'encoding': (optional, by default text). Can be one of:
1533 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1534 'content' (mandatory): string with the content of the file
1535 'permissions': (optional) string with file permissions, typically octal notation '0644'
1536 'owner': (optional) file owner, string with the format 'owner:group'
1537 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1538 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1539 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1540 'size': (mandatory) string with the size of the disk in GB
1541 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1542 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1543 availability_zone_index is None
1544 Returns a tuple with the instance identifier and created_items or raises an exception on error
1545 created_items can be None or a dictionary where this method can include key-values that will be passed to
1546 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1547 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1548 as not present.
1549 """
1550 self.logger.info("Creating new instance for entry {}".format(name))
1551 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1552 "availability_zone_index {} availability_zone_list {}"\
1553 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1554 availability_zone_index, availability_zone_list))
1555
1556 #new vm name = vmname + tenant_id + uuid
1557 new_vm_name = [name, '-', str(uuid.uuid4())]
1558 vmname_andid = ''.join(new_vm_name)
1559
1560 for net in net_list:
1561 if net['type'] == "PCI-PASSTHROUGH":
1562 raise vimconn.vimconnNotSupportedException(
1563 "Current vCD version does not support type : {}".format(net['type']))
1564
1565 if len(net_list) > 10:
1566 raise vimconn.vimconnNotSupportedException(
1567 "The VM hardware versions 7 and above support upto 10 NICs only")
1568
1569 # if vm already deployed we return existing uuid
1570 # we check for presence of VDC, Catalog entry and Flavor.
1571 org, vdc = self.get_vdc_details()
1572 if vdc is None:
1573 raise vimconn.vimconnNotFoundException(
1574 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1575 catalogs = org.list_catalogs()
1576 if catalogs is None:
1577 #Retry once, if failed by refreshing token
1578 self.get_token()
1579 org = Org(self.client, resource=self.client.get_org())
1580 catalogs = org.list_catalogs()
1581 if catalogs is None:
1582 raise vimconn.vimconnNotFoundException(
1583 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1584
1585 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1586 if catalog_hash_name:
1587 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1588 else:
1589 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1590 "(Failed retrieve catalog information {})".format(name, image_id))
1591
1592 # Set vCPU and Memory based on flavor.
1593 vm_cpus = None
1594 vm_memory = None
1595 vm_disk = None
1596 numas = None
1597
1598 if flavor_id is not None:
1599 if flavor_id not in vimconnector.flavorlist:
1600 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1601 "Failed retrieve flavor information "
1602 "flavor id {}".format(name, flavor_id))
1603 else:
1604 try:
1605 flavor = vimconnector.flavorlist[flavor_id]
1606 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1607 vm_memory = flavor[FLAVOR_RAM_KEY]
1608 vm_disk = flavor[FLAVOR_DISK_KEY]
1609 extended = flavor.get("extended", None)
1610 if extended:
1611 numas=extended.get("numas", None)
1612
1613 except Exception as exp:
1614 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1615
1616 # image upload creates template name as catalog name space Template.
1617 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1618 power_on = 'false'
1619 if start:
1620 power_on = 'true'
1621
1622 # client must provide at least one entry in net_list if not we report error
1623 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1624 #If no mgmt, then the 1st NN in netlist is considered as primary net.
1625 primary_net = None
1626 primary_netname = None
1627 primary_net_href = None
1628 network_mode = 'bridged'
1629 if net_list is not None and len(net_list) > 0:
1630 for net in net_list:
1631 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1632 primary_net = net
1633 if primary_net is None:
1634 primary_net = net_list[0]
1635
1636 try:
1637 primary_net_id = primary_net['net_id']
1638 url_list = [self.url, '/api/network/', primary_net_id]
1639 primary_net_href = ''.join(url_list)
1640 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1641 if 'name' in network_dict:
1642 primary_netname = network_dict['name']
1643
1644 except KeyError:
1645 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1646 else:
1647 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1648
1649 # use: 'data', 'bridge', 'mgmt'
1650 # create vApp. Set vcpu and ram based on flavor id.
1651 try:
1652 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1653 if not vdc_obj:
1654 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1655
1656 for retry in (1,2):
1657 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1658 catalog_items = [items.attrib]
1659
1660 if len(catalog_items) == 1:
1661 if self.client:
1662 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1663 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1664
1665 response = self.perform_request(req_type='GET',
1666 url=catalog_items[0].get('href'),
1667 headers=headers)
1668 catalogItem = XmlElementTree.fromstring(response.content)
1669 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1670 vapp_tempalte_href = entity.get("href")
1671
1672 response = self.perform_request(req_type='GET',
1673 url=vapp_tempalte_href,
1674 headers=headers)
1675 if response.status_code != requests.codes.ok:
1676 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1677 response.status_code))
1678 else:
1679 result = (response.content).replace("\n"," ")
1680
1681 vapp_template_tree = XmlElementTree.fromstring(response.content)
1682 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1683 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1684 vm_name = vm_element.get('name')
1685 vm_id = vm_element.get('id')
1686 vm_href = vm_element.get('href')
1687
1688 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1689 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1690 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1691
1692 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1693 vdc_id = vdc.get('id').split(':')[-1]
1694 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1695 vdc_id)
1696 data = """<?xml version="1.0" encoding="UTF-8"?>
1697 <InstantiateVAppTemplateParams
1698 xmlns="http://www.vmware.com/vcloud/v1.5"
1699 name="{}"
1700 deploy="false"
1701 powerOn="false"
1702 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1703 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1704 <Description>Vapp instantiation</Description>
1705 <InstantiationParams>
1706 <NetworkConfigSection>
1707 <ovf:Info>Configuration parameters for logical networks</ovf:Info>
1708 <NetworkConfig networkName="{}">
1709 <Configuration>
1710 <ParentNetwork href="{}" />
1711 <FenceMode>bridged</FenceMode>
1712 </Configuration>
1713 </NetworkConfig>
1714 </NetworkConfigSection>
1715 <LeaseSettingsSection
1716 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
1717 <ovf:Info>Lease Settings</ovf:Info>
1718 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
1719 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
1720 </LeaseSettingsSection>
1721 </InstantiationParams>
1722 <Source href="{}"/>
1723 <SourcedItem>
1724 <Source href="{}" id="{}" name="{}"
1725 type="application/vnd.vmware.vcloud.vm+xml"/>
1726 <VmGeneralParams>
1727 <NeedsCustomization>false</NeedsCustomization>
1728 </VmGeneralParams>
1729 <InstantiationParams>
1730 <NetworkConnectionSection>
1731 <ovf:Info>Specifies the available VM network connections</ovf:Info>
1732 <NetworkConnection network="{}">
1733 <NetworkConnectionIndex>0</NetworkConnectionIndex>
1734 <IsConnected>true</IsConnected>
1735 <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
1736 </NetworkConnection>
1737 </NetworkConnectionSection><ovf:VirtualHardwareSection>
1738 <ovf:Info>Virtual hardware requirements</ovf:Info>
1739 <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
1740 xmlns:vmw="http://www.vmware.com/schema/ovf">
1741 <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
1742 <rasd:Description>Number of Virtual CPUs</rasd:Description>
1743 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
1744 <rasd:InstanceID>4</rasd:InstanceID>
1745 <rasd:Reservation>0</rasd:Reservation>
1746 <rasd:ResourceType>3</rasd:ResourceType>
1747 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
1748 <rasd:Weight>0</rasd:Weight>
1749 <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
1750 </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
1751 <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
1752 <rasd:Description>Memory Size</rasd:Description>
1753 <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
1754 <rasd:InstanceID>5</rasd:InstanceID>
1755 <rasd:Reservation>0</rasd:Reservation>
1756 <rasd:ResourceType>4</rasd:ResourceType>
1757 <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
1758 <rasd:Weight>0</rasd:Weight>
1759 </ovf:Item>
1760 </ovf:VirtualHardwareSection>
1761 </InstantiationParams>
1762 </SourcedItem>
1763 <AllEULAsAccepted>false</AllEULAsAccepted>
1764 </InstantiateVAppTemplateParams>""".format(vmname_andid,
1765 primary_netname,
1766 primary_net_href,
1767 vapp_tempalte_href,
1768 vm_href,
1769 vm_id,
1770 vm_name,
1771 primary_netname,
1772 cpu=cpus,
1773 core=cores,
1774 memory=memory_mb)
1775
1776 response = self.perform_request(req_type='POST',
1777 url=instantiate_vapp_href,
1778 headers=headers,
1779 data=data)
1780
1781 if response.status_code != 201:
1782 self.logger.error("REST call {} failed reason : {}"\
1783 "status code : {}".format(instantiate_vapp_href,
1784 response.content,
1785 response.status_code))
1786 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1787 "vAapp {}".format(vmname_andid))
1788 else:
1789 vapptask = self.get_task_from_response(response.content)
1790
1791 if vapptask is None and retry==1:
1792 self.get_token() # Retry getting token
1793 continue
1794 else:
1795 break
1796
1797 if vapptask is None or vapptask is False:
1798 raise vimconn.vimconnUnexpectedResponse(
1799 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1800
1801 # wait for task to complete
1802 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1803
1804 if result.get('status') == 'success':
1805 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1806 else:
1807 raise vimconn.vimconnUnexpectedResponse(
1808 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1809
1810 except Exception as exp:
1811 raise vimconn.vimconnUnexpectedResponse(
1812 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1813
1814 # we should have now vapp in undeployed state.
1815 try:
1816 vdc_obj = VDC(self.client, href=vdc.get('href'))
1817 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1818 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1819 vapp = VApp(self.client, resource=vapp_resource)
1820
1821 except Exception as exp:
1822 raise vimconn.vimconnUnexpectedResponse(
1823 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1824 .format(vmname_andid, exp))
1825
1826 if vapp_uuid is None:
1827 raise vimconn.vimconnUnexpectedResponse(
1828 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
1829 vmname_andid))
1830
1831 #Add PCI passthrough/SRIOV configrations
1832 vm_obj = None
1833 pci_devices_info = []
1834 reserve_memory = False
1835
1836 for net in net_list:
1837 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1838 pci_devices_info.append(net)
1839 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1840 reserve_memory = True
1841
1842 #Add PCI
1843 if len(pci_devices_info) > 0:
1844 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1845 vmname_andid ))
1846 PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
1847 pci_devices_info,
1848 vmname_andid)
1849 if PCI_devices_status:
1850 self.logger.info("Added PCI devives {} to VM {}".format(
1851 pci_devices_info,
1852 vmname_andid)
1853 )
1854 reserve_memory = True
1855 else:
1856 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1857 pci_devices_info,
1858 vmname_andid)
1859 )
1860
1861 # Modify vm disk
1862 if vm_disk:
1863 #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1864 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1865 if result :
1866 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1867
1868 #Add new or existing disks to vApp
1869 if disk_list:
1870 added_existing_disk = False
1871 for disk in disk_list:
1872 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1873 image_id = disk['image_id']
1874 # Adding CD-ROM to VM
1875 # will revisit code once specification ready to support this feature
1876 self.insert_media_to_vm(vapp, image_id)
1877 elif "image_id" in disk and disk["image_id"] is not None:
1878 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1879 disk["image_id"] , vapp_uuid))
1880 self.add_existing_disk(catalogs=catalogs,
1881 image_id=disk["image_id"],
1882 size = disk["size"],
1883 template_name=templateName,
1884 vapp_uuid=vapp_uuid
1885 )
1886 added_existing_disk = True
1887 else:
1888 #Wait till added existing disk gets reflected into vCD database/API
1889 if added_existing_disk:
1890 time.sleep(5)
1891 added_existing_disk = False
1892 self.add_new_disk(vapp_uuid, disk['size'])
1893
1894 if numas:
1895 # Assigning numa affinity setting
1896 for numa in numas:
1897 if 'paired-threads-id' in numa:
1898 paired_threads_id = numa['paired-threads-id']
1899 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1900
1901 # add NICs & connect to networks in netlist
1902 try:
1903 vdc_obj = VDC(self.client, href=vdc.get('href'))
1904 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1905 vapp = VApp(self.client, resource=vapp_resource)
1906 vapp_id = vapp_resource.get('id').split(':')[-1]
1907
1908 self.logger.info("Removing primary NIC: ")
1909 # First remove all NICs so that NIC properties can be adjusted as needed
1910 self.remove_primary_network_adapter_from_all_vms(vapp)
1911
1912 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1913 primary_nic_index = 0
1914 nicIndex = 0
1915 for net in net_list:
1916 # openmano uses network id in UUID format.
1917 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1918 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1919 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1920
1921 if 'net_id' not in net:
1922 continue
1923
1924 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1925 #Same will be returned in refresh_vms_status() as vim_interface_id
1926 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1927
1928 interface_net_id = net['net_id']
1929 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1930 interface_network_mode = net['use']
1931
1932 if interface_network_mode == 'mgmt':
1933 primary_nic_index = nicIndex
1934
1935 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1936 - DHCP (The IP address is obtained from a DHCP service.)
1937 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1938 - NONE (No IP addressing mode specified.)"""
1939
1940 if primary_netname is not None:
1941 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1942 nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
1943 #For python3
1944 #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1945 if len(nets) == 1:
1946 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1947
1948 if interface_net_name != primary_netname:
1949 # connect network to VM - with all DHCP by default
1950 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1951 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1952
1953 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1954 nic_type = 'VMXNET3'
1955 if 'type' in net and net['type'] not in type_list:
1956 # fetching nic type from vnf
1957 if 'model' in net:
1958 if net['model'] is not None:
1959 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1960 nic_type = 'VMXNET3'
1961 else:
1962 nic_type = net['model']
1963
1964 self.logger.info("new_vminstance(): adding network adapter "\
1965 "to a network {}".format(nets[0].get('name')))
1966 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1967 primary_nic_index,
1968 nicIndex,
1969 net,
1970 nic_type=nic_type)
1971 else:
1972 self.logger.info("new_vminstance(): adding network adapter "\
1973 "to a network {}".format(nets[0].get('name')))
1974 if net['type'] in ['SR-IOV', 'VF']:
1975 nic_type = net['type']
1976 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1977 primary_nic_index,
1978 nicIndex,
1979 net,
1980 nic_type=nic_type)
1981 nicIndex += 1
1982
1983 # cloud-init for ssh-key injection
1984 if cloud_config:
1985 # Create a catalog which will be carrying the config drive ISO
1986 # This catalog is deleted during vApp deletion. The catalog name carries
1987 # vApp UUID and thats how it gets identified during its deletion.
1988 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1989 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1990 config_drive_catalog_name))
1991 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1992 if config_drive_catalog_id is None:
1993 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1994 "ISO".format(config_drive_catalog_name)
1995 raise Exception(error_msg)
1996
1997 # Create config-drive ISO
1998 _, userdata = self._create_user_data(cloud_config)
1999 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2000 iso_path = self.create_config_drive_iso(userdata)
2001 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
2002
2003 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
2004 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2005 # Attach the config-drive ISO to the VM
2006 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
2007 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
2008 time.sleep(5)
2009 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2010 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2011
2012 # If VM has PCI devices or SRIOV reserve memory for VM
2013 if reserve_memory:
2014 self.reserve_memory_for_all_vms(vapp, memory_mb)
2015
2016 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
2017
2018 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2019 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2020 if result.get('status') == 'success':
2021 self.logger.info("new_vminstance(): Successfully power on "\
2022 "vApp {}".format(vmname_andid))
2023 else:
2024 self.logger.error("new_vminstance(): failed to power on vApp "\
2025 "{}".format(vmname_andid))
2026
2027 except Exception as exp:
2028 try:
2029 self.delete_vminstance(vapp_uuid)
2030 except Exception as exp2:
2031 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2032 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2033 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
2034 .format(name, exp))
2035 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
2036 .format(name, exp))
2037
2038 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2039 wait_time = 0
2040 vapp_uuid = None
2041 while wait_time <= MAX_WAIT_TIME:
2042 try:
2043 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2044 vapp = VApp(self.client, resource=vapp_resource)
2045 except Exception as exp:
2046 raise vimconn.vimconnUnexpectedResponse(
2047 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
2048 .format(vmname_andid, exp))
2049
2050 #if vapp and vapp.me.deployed:
2051 if vapp and vapp_resource.get('deployed') == 'true':
2052 vapp_uuid = vapp_resource.get('id').split(':')[-1]
2053 break
2054 else:
2055 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
2056 time.sleep(INTERVAL_TIME)
2057
2058 wait_time +=INTERVAL_TIME
2059
2060 #SET Affinity Rule for VM
2061 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2062 #While creating VIM account user has to pass the Host Group names in availability_zone list
2063 #"availability_zone" is a part of VIM "config" parameters
2064 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2065 #Host groups are referred as availability zones
2066 #With following procedure, deployed VM will be added into a VM group.
2067 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
2068 if(availability_zone_list):
2069 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
2070 #Admin access required for creating Affinity rules
2071 client = self.connect_as_admin()
2072 if not client:
2073 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
2074 else:
2075 self.client = client
2076 if self.client:
2077 headers = {'Accept':'application/*+xml;version=27.0',
2078 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2079 #Step1: Get provider vdc details from organization
2080 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2081 if pvdc_href is not None:
2082 #Step2: Found required pvdc, now get resource pool information
2083 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2084 if respool_href is None:
2085 #Raise error if respool_href not found
2086 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
2087 .format(pvdc_href)
2088 self.log_message(msg)
2089
2090 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2091 # get availability Zone
2092 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2093 # check if provided av zone(hostGroup) is present in vCD VIM
2094 status = self.check_availibility_zone(vm_az, respool_href, headers)
2095 if status is False:
2096 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2097 "resource pool {} status: {}".format(vm_az,respool_href,status)
2098 self.log_message(msg)
2099 else:
2100 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2101
2102 #Step4: Find VM group references to create vm group
2103 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2104 if vmgrp_href == None:
2105 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2106 self.log_message(msg)
2107
2108 #Step5: Create a VmGroup with name az_VmGroup
2109 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2110 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2111 if status is not True:
2112 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2113 self.log_message(msg)
2114
2115 #VM Group url to add vms to vm group
2116 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2117
2118 #Step6: Add VM to VM Group
2119 #Find VM uuid from vapp_uuid
2120 vm_details = self.get_vapp_details_rest(vapp_uuid)
2121 vm_uuid = vm_details['vmuuid']
2122
2123 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2124 if status is not True:
2125 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2126 self.log_message(msg)
2127
2128 #Step7: Create VM to Host affinity rule
2129 addrule_href = self.get_add_rule_reference (respool_href, headers)
2130 if addrule_href is None:
2131 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2132 .format(respool_href)
2133 self.log_message(msg)
2134
2135 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2136 if status is False:
2137 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2138 .format(name, vm_az)
2139 self.log_message(msg)
2140 else:
2141 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2142 .format(name, vm_az))
2143 #Reset token to a normal user to perform other operations
2144 self.get_token()
2145
2146 if vapp_uuid is not None:
2147 return vapp_uuid, None
2148 else:
2149 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2150
2151 def create_config_drive_iso(self, user_data):
2152 tmpdir = tempfile.mkdtemp()
2153 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2154 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2155 os.makedirs(latest_dir)
2156 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2157 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2158 userdata_file_obj.write(user_data)
2159 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2160 "launch_index": 0,
2161 "name": "ConfigDrive",
2162 "uuid": str(uuid.uuid4())}
2163 )
2164 )
2165 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2166 iso_path=iso_path, source_dir_path=tmpdir)
2167 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2168 try:
2169 FNULL = open(os.devnull, 'w')
2170 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2171 except subprocess.CalledProcessError as e:
2172 shutil.rmtree(tmpdir, ignore_errors=True)
2173 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2174 self.logger.error(error_msg)
2175 raise Exception(error_msg)
2176 return iso_path
2177
2178 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2179 if not os.path.isfile(iso_file_path):
2180 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2181 self.logger.error(error_msg)
2182 raise Exception(error_msg)
2183 iso_file_stat = os.stat(iso_file_path)
2184 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2185 <Media
2186 xmlns="http://www.vmware.com/vcloud/v1.5"
2187 name="{iso_name}"
2188 size="{iso_size}"
2189 imageType="iso">
2190 <Description>ISO image for config-drive</Description>
2191 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2192 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2193 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2194 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2195 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2196 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2197
2198 if response.status_code != 201:
2199 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2200 self.logger.error(error_msg)
2201 raise Exception(error_msg)
2202
2203 catalogItem = XmlElementTree.fromstring(response.content)
2204 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2205 entity_href = entity.get('href')
2206
2207 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2208 if response.status_code != 200:
2209 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2210
2211 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2212 if match:
2213 media_upload_href = match.group(1)
2214 else:
2215 raise Exception('Could not parse the upload URL for the media file from the last response')
2216 upload_iso_task = self.get_task_from_response(response.content)
2217 headers['Content-Type'] = 'application/octet-stream'
2218 response = self.perform_request(req_type='PUT',
2219 url=media_upload_href,
2220 headers=headers,
2221 data=open(iso_file_path, 'rb'))
2222
2223 if response.status_code != 200:
2224 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2225 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2226 if result.get('status') != 'success':
2227 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2228
2229 def get_vcd_availibility_zones(self,respool_href, headers):
2230 """ Method to find presence of av zone is VIM resource pool
2231
2232 Args:
2233 respool_href - resource pool href
2234 headers - header information
2235
2236 Returns:
2237 vcd_az - list of azone present in vCD
2238 """
2239 vcd_az = []
2240 url=respool_href
2241 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2242
2243 if resp.status_code != requests.codes.ok:
2244 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2245 else:
2246 #Get the href to hostGroups and find provided hostGroup is present in it
2247 resp_xml = XmlElementTree.fromstring(resp.content)
2248 for child in resp_xml:
2249 if 'VMWProviderVdcResourcePool' in child.tag:
2250 for schild in child:
2251 if 'Link' in schild.tag:
2252 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2253 hostGroup = schild.attrib.get('href')
2254 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2255 if hg_resp.status_code != requests.codes.ok:
2256 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2257 else:
2258 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2259 for hostGroup in hg_resp_xml:
2260 if 'HostGroup' in hostGroup.tag:
2261 #append host group name to the list
2262 vcd_az.append(hostGroup.attrib.get("name"))
2263 return vcd_az
2264
2265
2266 def set_availability_zones(self):
2267 """
2268 Set vim availability zone
2269 """
2270
2271 vim_availability_zones = None
2272 availability_zone = None
2273 if 'availability_zone' in self.config:
2274 vim_availability_zones = self.config.get('availability_zone')
2275 if isinstance(vim_availability_zones, str):
2276 availability_zone = [vim_availability_zones]
2277 elif isinstance(vim_availability_zones, list):
2278 availability_zone = vim_availability_zones
2279 else:
2280 return availability_zone
2281
2282 return availability_zone
2283
2284
2285 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2286 """
2287 Return the availability zone to be used by the created VM.
2288 returns: The VIM availability zone to be used or None
2289 """
2290 if availability_zone_index is None:
2291 if not self.config.get('availability_zone'):
2292 return None
2293 elif isinstance(self.config.get('availability_zone'), str):
2294 return self.config['availability_zone']
2295 else:
2296 return self.config['availability_zone'][0]
2297
2298 vim_availability_zones = self.availability_zone
2299
2300 # check if VIM offer enough availability zones describe in the VNFD
2301 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2302 # check if all the names of NFV AV match VIM AV names
2303 match_by_index = False
2304 for av in availability_zone_list:
2305 if av not in vim_availability_zones:
2306 match_by_index = True
2307 break
2308 if match_by_index:
2309 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2310 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2311 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2312 self.logger.debug("VIM Availability zones will be used by index")
2313 return vim_availability_zones[availability_zone_index]
2314 else:
2315 return availability_zone_list[availability_zone_index]
2316 else:
2317 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2318
2319
2320 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2321 """ Method to create VM to Host Affinity rule in vCD
2322
2323 Args:
2324 addrule_href - href to make a POST request
2325 vmgrpname - name of the VM group created
2326 hostgrpnmae - name of the host group created earlier
2327 polarity - Affinity or Anti-affinity (default: Affinity)
2328 headers - headers to make REST call
2329
2330 Returns:
2331 True- if rule is created
2332 False- Failed to create rule due to some error
2333
2334 """
2335 task_status = False
2336 rule_name = polarity + "_" + vmgrpname
2337 payload = """<?xml version="1.0" encoding="UTF-8"?>
2338 <vmext:VMWVmHostAffinityRule
2339 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2340 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2341 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2342 <vcloud:Name>{}</vcloud:Name>
2343 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2344 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2345 <vcloud:Polarity>{}</vcloud:Polarity>
2346 <vmext:HostGroupName>{}</vmext:HostGroupName>
2347 <vmext:VmGroupName>{}</vmext:VmGroupName>
2348 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2349
2350 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2351
2352 if resp.status_code != requests.codes.accepted:
2353 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2354 task_status = False
2355 return task_status
2356 else:
2357 affinity_task = self.get_task_from_response(resp.content)
2358 self.logger.debug ("affinity_task: {}".format(affinity_task))
2359 if affinity_task is None or affinity_task is False:
2360 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2361 # wait for task to complete
2362 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2363 if result.get('status') == 'success':
2364 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2365 return True
2366 else:
2367 raise vimconn.vimconnUnexpectedResponse(
2368 "failed to create affinity rule {}".format(rule_name))
2369
2370
2371 def get_add_rule_reference (self, respool_href, headers):
2372 """ This method finds href to add vm to host affinity rule to vCD
2373
2374 Args:
2375 respool_href- href to resource pool
2376 headers- header information to make REST call
2377
2378 Returns:
2379 None - if no valid href to add rule found or
2380 addrule_href - href to add vm to host affinity rule of resource pool
2381 """
2382 addrule_href = None
2383 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2384
2385 if resp.status_code != requests.codes.ok:
2386 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2387 else:
2388
2389 resp_xml = XmlElementTree.fromstring(resp.content)
2390 for child in resp_xml:
2391 if 'VMWProviderVdcResourcePool' in child.tag:
2392 for schild in child:
2393 if 'Link' in schild.tag:
2394 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2395 schild.attrib.get('rel') == "add":
2396 addrule_href = schild.attrib.get('href')
2397 break
2398
2399 return addrule_href
2400
2401
2402 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2403 """ Method to add deployed VM to newly created VM Group.
2404 This is required to create VM to Host affinity in vCD
2405
2406 Args:
2407 vm_uuid- newly created vm uuid
2408 vmGroupNameURL- URL to VM Group name
2409 vmGroup_name- Name of VM group created
2410 headers- Headers for REST request
2411
2412 Returns:
2413 True- if VM added to VM group successfully
2414 False- if any error encounter
2415 """
2416
2417 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2418
2419 if addvm_resp.status_code != requests.codes.ok:
2420 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2421 .format(vmGroupNameURL, addvm_resp.status_code))
2422 return False
2423 else:
2424 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2425 for child in resp_xml:
2426 if child.tag.split('}')[1] == 'Link':
2427 if child.attrib.get("rel") == "addVms":
2428 addvmtogrpURL = child.attrib.get("href")
2429
2430 #Get vm details
2431 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2432 vmdetailsURL = ''.join(url_list)
2433
2434 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2435
2436 if resp.status_code != requests.codes.ok:
2437 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2438 return False
2439
2440 #Parse VM details
2441 resp_xml = XmlElementTree.fromstring(resp.content)
2442 if resp_xml.tag.split('}')[1] == "Vm":
2443 vm_id = resp_xml.attrib.get("id")
2444 vm_name = resp_xml.attrib.get("name")
2445 vm_href = resp_xml.attrib.get("href")
2446 #print vm_id, vm_name, vm_href
2447 #Add VM into VMgroup
2448 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2449 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2450 xmlns="http://www.vmware.com/vcloud/versions" \
2451 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2452 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2453 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2454 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2455 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2456 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2457 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2458 <ns2:VmReference href="{}" id="{}" name="{}" \
2459 type="application/vnd.vmware.vcloud.vm+xml" />\
2460 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2461
2462 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2463
2464 if addvmtogrp_resp.status_code != requests.codes.accepted:
2465 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2466 return False
2467 else:
2468 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2469 return True
2470
2471
2472 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2473 """Method to create a VM group in vCD
2474
2475 Args:
2476 vmgroup_name : Name of VM group to be created
2477 vmgroup_href : href for vmgroup
2478 headers- Headers for REST request
2479 """
2480 #POST to add URL with required data
2481 vmgroup_status = False
2482 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2483 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2484 <vmCount>1</vmCount>\
2485 </VMWVmGroup>""".format(vmgroup_name)
2486 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2487
2488 if resp.status_code != requests.codes.accepted:
2489 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2490 return vmgroup_status
2491 else:
2492 vmgroup_task = self.get_task_from_response(resp.content)
2493 if vmgroup_task is None or vmgroup_task is False:
2494 raise vimconn.vimconnUnexpectedResponse(
2495 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2496
2497 # wait for task to complete
2498 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2499
2500 if result.get('status') == 'success':
2501 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2502 #time.sleep(10)
2503 vmgroup_status = True
2504 return vmgroup_status
2505 else:
2506 raise vimconn.vimconnUnexpectedResponse(\
2507 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2508
2509
2510 def find_vmgroup_reference(self, url, headers):
2511 """ Method to create a new VMGroup which is required to add created VM
2512 Args:
2513 url- resource pool href
2514 headers- header information
2515
2516 Returns:
2517 returns href to VM group to create VM group
2518 """
2519 #Perform GET on resource pool to find 'add' link to create VMGroup
2520 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2521 vmgrp_href = None
2522 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2523
2524 if resp.status_code != requests.codes.ok:
2525 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2526 else:
2527 #Get the href to add vmGroup to vCD
2528 resp_xml = XmlElementTree.fromstring(resp.content)
2529 for child in resp_xml:
2530 if 'VMWProviderVdcResourcePool' in child.tag:
2531 for schild in child:
2532 if 'Link' in schild.tag:
2533 #Find href with type VMGroup and rel with add
2534 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2535 and schild.attrib.get('rel') == "add":
2536 vmgrp_href = schild.attrib.get('href')
2537 return vmgrp_href
2538
2539
2540 def check_availibility_zone(self, az, respool_href, headers):
2541 """ Method to verify requested av zone is present or not in provided
2542 resource pool
2543
2544 Args:
2545 az - name of hostgroup (availibility_zone)
2546 respool_href - Resource Pool href
2547 headers - Headers to make REST call
2548 Returns:
2549 az_found - True if availibility_zone is found else False
2550 """
2551 az_found = False
2552 headers['Accept']='application/*+xml;version=27.0'
2553 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2554
2555 if resp.status_code != requests.codes.ok:
2556 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2557 else:
2558 #Get the href to hostGroups and find provided hostGroup is present in it
2559 resp_xml = XmlElementTree.fromstring(resp.content)
2560
2561 for child in resp_xml:
2562 if 'VMWProviderVdcResourcePool' in child.tag:
2563 for schild in child:
2564 if 'Link' in schild.tag:
2565 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2566 hostGroup_href = schild.attrib.get('href')
2567 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2568 if hg_resp.status_code != requests.codes.ok:
2569 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2570 else:
2571 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2572 for hostGroup in hg_resp_xml:
2573 if 'HostGroup' in hostGroup.tag:
2574 if hostGroup.attrib.get("name") == az:
2575 az_found = True
2576 break
2577 return az_found
2578
2579
2580 def get_pvdc_for_org(self, org_vdc, headers):
2581 """ This method gets provider vdc references from organisation
2582
2583 Args:
2584 org_vdc - name of the organisation VDC to find pvdc
2585 headers - headers to make REST call
2586
2587 Returns:
2588 None - if no pvdc href found else
2589 pvdc_href - href to pvdc
2590 """
2591
2592 #Get provider VDC references from vCD
2593 pvdc_href = None
2594 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2595 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2596 url = ''.join(url_list)
2597
2598 response = self.perform_request(req_type='GET',url=url, headers=headers)
2599 if response.status_code != requests.codes.ok:
2600 self.logger.debug ("REST API call {} failed. Return status code {}"\
2601 .format(url, response.status_code))
2602 else:
2603 xmlroot_response = XmlElementTree.fromstring(response.content)
2604 for child in xmlroot_response:
2605 if 'ProviderVdcReference' in child.tag:
2606 pvdc_href = child.attrib.get('href')
2607 #Get vdcReferences to find org
2608 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2609 if pvdc_resp.status_code != requests.codes.ok:
2610 raise vimconn.vimconnException("REST API call {} failed. "\
2611 "Return status code {}"\
2612 .format(url, pvdc_resp.status_code))
2613
2614 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2615 for child in pvdc_resp_xml:
2616 if 'Link' in child.tag:
2617 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2618 vdc_href = child.attrib.get('href')
2619
2620 #Check if provided org is present in vdc
2621 vdc_resp = self.perform_request(req_type='GET',
2622 url=vdc_href,
2623 headers=headers)
2624 if vdc_resp.status_code != requests.codes.ok:
2625 raise vimconn.vimconnException("REST API call {} failed. "\
2626 "Return status code {}"\
2627 .format(url, vdc_resp.status_code))
2628 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2629 for child in vdc_resp_xml:
2630 if 'VdcReference' in child.tag:
2631 if child.attrib.get('name') == org_vdc:
2632 return pvdc_href
2633
2634
2635 def get_resource_pool_details(self, pvdc_href, headers):
2636 """ Method to get resource pool information.
2637 Host groups are property of resource group.
2638 To get host groups, we need to GET details of resource pool.
2639
2640 Args:
2641 pvdc_href: href to pvdc details
2642 headers: headers
2643
2644 Returns:
2645 respool_href - Returns href link reference to resource pool
2646 """
2647 respool_href = None
2648 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2649
2650 if resp.status_code != requests.codes.ok:
2651 self.logger.debug ("REST API call {} failed. Return status code {}"\
2652 .format(pvdc_href, resp.status_code))
2653 else:
2654 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2655 for child in respool_resp_xml:
2656 if 'Link' in child.tag:
2657 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2658 respool_href = child.attrib.get("href")
2659 break
2660 return respool_href
2661
2662
2663 def log_message(self, msg):
2664 """
2665 Method to log error messages related to Affinity rule creation
2666 in new_vminstance & raise Exception
2667 Args :
2668 msg - Error message to be logged
2669
2670 """
2671 #get token to connect vCD as a normal user
2672 self.get_token()
2673 self.logger.debug(msg)
2674 raise vimconn.vimconnException(msg)
2675
2676
2677 ##
2678 ##
2679 ## based on current discussion
2680 ##
2681 ##
2682 ## server:
2683 # created: '2016-09-08T11:51:58'
2684 # description: simple-instance.linux1.1
2685 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2686 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2687 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2688 # status: ACTIVE
2689 # error_msg:
2690 # interfaces: …
2691 #
2692 def get_vminstance(self, vim_vm_uuid=None):
2693 """Returns the VM instance information from VIM"""
2694
2695 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2696
2697 org, vdc = self.get_vdc_details()
2698 if vdc is None:
2699 raise vimconn.vimconnConnectionException(
2700 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2701
2702 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2703 if not vm_info_dict:
2704 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2705 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2706
2707 status_key = vm_info_dict['status']
2708 error = ''
2709 try:
2710 vm_dict = {'created': vm_info_dict['created'],
2711 'description': vm_info_dict['name'],
2712 'status': vcdStatusCode2manoFormat[int(status_key)],
2713 'hostId': vm_info_dict['vmuuid'],
2714 'error_msg': error,
2715 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2716
2717 if 'interfaces' in vm_info_dict:
2718 vm_dict['interfaces'] = vm_info_dict['interfaces']
2719 else:
2720 vm_dict['interfaces'] = []
2721 except KeyError:
2722 vm_dict = {'created': '',
2723 'description': '',
2724 'status': vcdStatusCode2manoFormat[int(-1)],
2725 'hostId': vm_info_dict['vmuuid'],
2726 'error_msg': "Inconsistency state",
2727 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2728
2729 return vm_dict
2730
2731 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2732 """Method poweroff and remove VM instance from vcloud director network.
2733
2734 Args:
2735 vm__vim_uuid: VM UUID
2736
2737 Returns:
2738 Returns the instance identifier
2739 """
2740
2741 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2742
2743 org, vdc = self.get_vdc_details()
2744 vdc_obj = VDC(self.client, href=vdc.get('href'))
2745 if vdc_obj is None:
2746 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2747 self.tenant_name))
2748 raise vimconn.vimconnException(
2749 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2750
2751 try:
2752 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2753 if vapp_name is None:
2754 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2755 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2756 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2757 vapp_resource = vdc_obj.get_vapp(vapp_name)
2758 vapp = VApp(self.client, resource=vapp_resource)
2759
2760 # Delete vApp and wait for status change if task executed and vApp is None.
2761
2762 if vapp:
2763 if vapp_resource.get('deployed') == 'true':
2764 self.logger.info("Powering off vApp {}".format(vapp_name))
2765 #Power off vApp
2766 powered_off = False
2767 wait_time = 0
2768 while wait_time <= MAX_WAIT_TIME:
2769 power_off_task = vapp.power_off()
2770 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2771
2772 if result.get('status') == 'success':
2773 powered_off = True
2774 break
2775 else:
2776 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2777 time.sleep(INTERVAL_TIME)
2778
2779 wait_time +=INTERVAL_TIME
2780 if not powered_off:
2781 self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2782 else:
2783 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2784
2785 #Undeploy vApp
2786 self.logger.info("Undeploy vApp {}".format(vapp_name))
2787 wait_time = 0
2788 undeployed = False
2789 while wait_time <= MAX_WAIT_TIME:
2790 vapp = VApp(self.client, resource=vapp_resource)
2791 if not vapp:
2792 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2793 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2794 undeploy_task = vapp.undeploy()
2795
2796 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2797 if result.get('status') == 'success':
2798 undeployed = True
2799 break
2800 else:
2801 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2802 time.sleep(INTERVAL_TIME)
2803
2804 wait_time +=INTERVAL_TIME
2805
2806 if not undeployed:
2807 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2808
2809 # delete vapp
2810 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2811
2812 if vapp is not None:
2813 wait_time = 0
2814 result = False
2815
2816 while wait_time <= MAX_WAIT_TIME:
2817 vapp = VApp(self.client, resource=vapp_resource)
2818 if not vapp:
2819 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2820 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2821
2822 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2823
2824 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2825 if result.get('status') == 'success':
2826 break
2827 else:
2828 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2829 time.sleep(INTERVAL_TIME)
2830
2831 wait_time +=INTERVAL_TIME
2832
2833 if result is None:
2834 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2835 else:
2836 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2837 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2838 catalog_list = self.get_image_list()
2839 try:
2840 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2841 if catalog_['name'] == config_drive_catalog_name][0]
2842 except IndexError:
2843 pass
2844 if config_drive_catalog_id:
2845 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2846 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2847 self.delete_image(config_drive_catalog_id)
2848 return vm__vim_uuid
2849 except:
2850 self.logger.debug(traceback.format_exc())
2851 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2852
2853
2854 def refresh_vms_status(self, vm_list):
2855 """Get the status of the virtual machines and their interfaces/ports
2856 Params: the list of VM identifiers
2857 Returns a dictionary with:
2858 vm_id: #VIM id of this Virtual Machine
2859 status: #Mandatory. Text with one of:
2860 # DELETED (not found at vim)
2861 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2862 # OTHER (Vim reported other status not understood)
2863 # ERROR (VIM indicates an ERROR status)
2864 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2865 # CREATING (on building process), ERROR
2866 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2867 #
2868 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2869 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2870 interfaces:
2871 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2872 mac_address: #Text format XX:XX:XX:XX:XX:XX
2873 vim_net_id: #network id where this interface is connected
2874 vim_interface_id: #interface/port VIM id
2875 ip_address: #null, or text with IPv4, IPv6 address
2876 """
2877
2878 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2879
2880 org,vdc = self.get_vdc_details()
2881 if vdc is None:
2882 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2883
2884 vms_dict = {}
2885 nsx_edge_list = []
2886 for vmuuid in vm_list:
2887 vapp_name = self.get_namebyvappid(vmuuid)
2888 if vapp_name is not None:
2889
2890 try:
2891 vm_pci_details = self.get_vm_pci_details(vmuuid)
2892 vdc_obj = VDC(self.client, href=vdc.get('href'))
2893 vapp_resource = vdc_obj.get_vapp(vapp_name)
2894 the_vapp = VApp(self.client, resource=vapp_resource)
2895
2896 vm_details = {}
2897 for vm in the_vapp.get_all_vms():
2898 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2899 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2900 response = self.perform_request(req_type='GET',
2901 url=vm.get('href'),
2902 headers=headers)
2903
2904 if response.status_code != 200:
2905 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2906 "status code : {}".format(vm.get('href'),
2907 response.content,
2908 response.status_code))
2909 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2910 "VM details")
2911 xmlroot = XmlElementTree.fromstring(response.content)
2912
2913
2914 result = response.content.replace("\n"," ")
2915 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2916 if hdd_match:
2917 hdd_mb = hdd_match.group(1)
2918 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2919 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2920 if cpus_match:
2921 cpus = cpus_match.group(1)
2922 vm_details['cpus'] = int(cpus) if cpus else None
2923 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2924 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2925 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2926 vm_details['id'] = xmlroot.get('id')
2927 vm_details['name'] = xmlroot.get('name')
2928 vm_info = [vm_details]
2929 if vm_pci_details:
2930 vm_info[0].update(vm_pci_details)
2931
2932 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2933 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2934 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2935
2936 # get networks
2937 vm_ip = None
2938 vm_mac = None
2939 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2940 for network in networks:
2941 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2942 vm_mac = mac_s.group(1) if mac_s else None
2943 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2944 vm_ip = ip_s.group(1) if ip_s else None
2945
2946 if vm_ip is None:
2947 if not nsx_edge_list:
2948 nsx_edge_list = self.get_edge_details()
2949 if nsx_edge_list is None:
2950 raise vimconn.vimconnException("refresh_vms_status:"\
2951 "Failed to get edge details from NSX Manager")
2952 if vm_mac is not None:
2953 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2954
2955 net_s = re.search('network="(.*?)"',network)
2956 network_name = net_s.group(1) if net_s else None
2957
2958 vm_net_id = self.get_network_id_by_name(network_name)
2959 interface = {"mac_address": vm_mac,
2960 "vim_net_id": vm_net_id,
2961 "vim_interface_id": vm_net_id,
2962 "ip_address": vm_ip}
2963
2964 vm_dict["interfaces"].append(interface)
2965
2966 # add a vm to vm dict
2967 vms_dict.setdefault(vmuuid, vm_dict)
2968 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2969 except Exception as exp:
2970 self.logger.debug("Error in response {}".format(exp))
2971 self.logger.debug(traceback.format_exc())
2972
2973 return vms_dict
2974
2975
2976 def get_edge_details(self):
2977 """Get the NSX edge list from NSX Manager
2978 Returns list of NSX edges
2979 """
2980 edge_list = []
2981 rheaders = {'Content-Type': 'application/xml'}
2982 nsx_api_url = '/api/4.0/edges'
2983
2984 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2985
2986 try:
2987 resp = requests.get(self.nsx_manager + nsx_api_url,
2988 auth = (self.nsx_user, self.nsx_password),
2989 verify = False, headers = rheaders)
2990 if resp.status_code == requests.codes.ok:
2991 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2992 for edge_pages in paged_Edge_List:
2993 if edge_pages.tag == 'edgePage':
2994 for edge_summary in edge_pages:
2995 if edge_summary.tag == 'pagingInfo':
2996 for element in edge_summary:
2997 if element.tag == 'totalCount' and element.text == '0':
2998 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2999 .format(self.nsx_manager))
3000
3001 if edge_summary.tag == 'edgeSummary':
3002 for element in edge_summary:
3003 if element.tag == 'id':
3004 edge_list.append(element.text)
3005 else:
3006 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
3007 .format(self.nsx_manager))
3008
3009 if not edge_list:
3010 raise vimconn.vimconnException("get_edge_details: "\
3011 "No NSX edge details found: {}"
3012 .format(self.nsx_manager))
3013 else:
3014 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
3015 return edge_list
3016 else:
3017 self.logger.debug("get_edge_details: "
3018 "Failed to get NSX edge details from NSX Manager: {}"
3019 .format(resp.content))
3020 return None
3021
3022 except Exception as exp:
3023 self.logger.debug("get_edge_details: "\
3024 "Failed to get NSX edge details from NSX Manager: {}"
3025 .format(exp))
3026 raise vimconn.vimconnException("get_edge_details: "\
3027 "Failed to get NSX edge details from NSX Manager: {}"
3028 .format(exp))
3029
3030
3031 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3032 """Get IP address details from NSX edges, using the MAC address
3033 PARAMS: nsx_edges : List of NSX edges
3034 mac_address : Find IP address corresponding to this MAC address
3035 Returns: IP address corrresponding to the provided MAC address
3036 """
3037
3038 ip_addr = None
3039 rheaders = {'Content-Type': 'application/xml'}
3040
3041 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3042
3043 try:
3044 for edge in nsx_edges:
3045 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
3046
3047 resp = requests.get(self.nsx_manager + nsx_api_url,
3048 auth = (self.nsx_user, self.nsx_password),
3049 verify = False, headers = rheaders)
3050
3051 if resp.status_code == requests.codes.ok:
3052 dhcp_leases = XmlElementTree.fromstring(resp.text)
3053 for child in dhcp_leases:
3054 if child.tag == 'dhcpLeaseInfo':
3055 dhcpLeaseInfo = child
3056 for leaseInfo in dhcpLeaseInfo:
3057 for elem in leaseInfo:
3058 if (elem.tag)=='macAddress':
3059 edge_mac_addr = elem.text
3060 if (elem.tag)=='ipAddress':
3061 ip_addr = elem.text
3062 if edge_mac_addr is not None:
3063 if edge_mac_addr == mac_address:
3064 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
3065 .format(ip_addr, mac_address,edge))
3066 return ip_addr
3067 else:
3068 self.logger.debug("get_ipaddr_from_NSXedge: "\
3069 "Error occurred while getting DHCP lease info from NSX Manager: {}"
3070 .format(resp.content))
3071
3072 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
3073 return None
3074
3075 except XmlElementTree.ParseError as Err:
3076 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
3077
3078
3079 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
3080 """Send and action over a VM instance from VIM
3081 Returns the vm_id if the action was successfully sent to the VIM"""
3082
3083 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
3084 if vm__vim_uuid is None or action_dict is None:
3085 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
3086
3087 org, vdc = self.get_vdc_details()
3088 if vdc is None:
3089 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3090
3091 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3092 if vapp_name is None:
3093 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3094 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3095 else:
3096 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3097
3098 try:
3099 vdc_obj = VDC(self.client, href=vdc.get('href'))
3100 vapp_resource = vdc_obj.get_vapp(vapp_name)
3101 vapp = VApp(self.client, resource=vapp_resource)
3102 if "start" in action_dict:
3103 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3104 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3105 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3106 self.instance_actions_result("start", result, vapp_name)
3107 elif "rebuild" in action_dict:
3108 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3109 rebuild_task = vapp.deploy(power_on=True)
3110 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3111 self.instance_actions_result("rebuild", result, vapp_name)
3112 elif "pause" in action_dict:
3113 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3114 pause_task = vapp.undeploy(action='suspend')
3115 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3116 self.instance_actions_result("pause", result, vapp_name)
3117 elif "resume" in action_dict:
3118 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3119 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3120 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3121 self.instance_actions_result("resume", result, vapp_name)
3122 elif "shutoff" in action_dict or "shutdown" in action_dict:
3123 action_name , value = action_dict.items()[0]
3124 #For python3
3125 #action_name , value = list(action_dict.items())[0]
3126 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3127 shutdown_task = vapp.shutdown()
3128 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3129 if action_name == "shutdown":
3130 self.instance_actions_result("shutdown", result, vapp_name)
3131 else:
3132 self.instance_actions_result("shutoff", result, vapp_name)
3133 elif "forceOff" in action_dict:
3134 result = vapp.undeploy(action='powerOff')
3135 self.instance_actions_result("forceOff", result, vapp_name)
3136 elif "reboot" in action_dict:
3137 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3138 reboot_task = vapp.reboot()
3139 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3140 else:
3141 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3142 return vm__vim_uuid
3143 except Exception as exp :
3144 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3145 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3146
3147 def instance_actions_result(self, action, result, vapp_name):
3148 if result.get('status') == 'success':
3149 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3150 else:
3151 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3152
3153 def get_vminstance_console(self, vm_id, console_type="novnc"):
3154 """
3155 Get a console for the virtual machine
3156 Params:
3157 vm_id: uuid of the VM
3158 console_type, can be:
3159 "novnc" (by default), "xvpvnc" for VNC types,
3160 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3161 Returns dict with the console parameters:
3162 protocol: ssh, ftp, http, https, ...
3163 server: usually ip address
3164 port: the http, ssh, ... port
3165 suffix: extra text, e.g. the http path and query string
3166 """
3167 console_dict = {}
3168
3169 if console_type==None or console_type=='novnc':
3170
3171 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3172
3173 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3174 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3175 response = self.perform_request(req_type='POST',
3176 url=url_rest_call,
3177 headers=headers)
3178
3179 if response.status_code == 403:
3180 response = self.retry_rest('GET', url_rest_call)
3181
3182 if response.status_code != 200:
3183 self.logger.error("REST call {} failed reason : {}"\
3184 "status code : {}".format(url_rest_call,
3185 response.content,
3186 response.status_code))
3187 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3188 "VM Mks ticket details")
3189 s = re.search("<Host>(.*?)</Host>",response.content)
3190 console_dict['server'] = s.group(1) if s else None
3191 s1 = re.search("<Port>(\d+)</Port>",response.content)
3192 console_dict['port'] = s1.group(1) if s1 else None
3193
3194
3195 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3196
3197 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3198 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3199 response = self.perform_request(req_type='POST',
3200 url=url_rest_call,
3201 headers=headers)
3202
3203 if response.status_code == 403:
3204 response = self.retry_rest('GET', url_rest_call)
3205
3206 if response.status_code != 200:
3207 self.logger.error("REST call {} failed reason : {}"\
3208 "status code : {}".format(url_rest_call,
3209 response.content,
3210 response.status_code))
3211 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3212 "VM console details")
3213 s = re.search(">.*?/(vm-\d+.*)</",response.content)
3214 console_dict['suffix'] = s.group(1) if s else None
3215 console_dict['protocol'] = "https"
3216
3217 return console_dict
3218
3219 # NOT USED METHODS in current version
3220
3221 def host_vim2gui(self, host, server_dict):
3222 """Transform host dictionary from VIM format to GUI format,
3223 and append to the server_dict
3224 """
3225 raise vimconn.vimconnNotImplemented("Should have implemented this")
3226
3227 def get_hosts_info(self):
3228 """Get the information of deployed hosts
3229 Returns the hosts content"""
3230 raise vimconn.vimconnNotImplemented("Should have implemented this")
3231
3232 def get_hosts(self, vim_tenant):
3233 """Get the hosts and deployed instances
3234 Returns the hosts content"""
3235 raise vimconn.vimconnNotImplemented("Should have implemented this")
3236
3237 def get_processor_rankings(self):
3238 """Get the processor rankings in the VIM database"""
3239 raise vimconn.vimconnNotImplemented("Should have implemented this")
3240
3241 def new_host(self, host_data):
3242 """Adds a new host to VIM"""
3243 '''Returns status code of the VIM response'''
3244 raise vimconn.vimconnNotImplemented("Should have implemented this")
3245
3246 def new_external_port(self, port_data):
3247 """Adds a external port to VIM"""
3248 '''Returns the port identifier'''
3249 raise vimconn.vimconnNotImplemented("Should have implemented this")
3250
3251 def new_external_network(self, net_name, net_type):
3252 """Adds a external network to VIM (shared)"""
3253 '''Returns the network identifier'''
3254 raise vimconn.vimconnNotImplemented("Should have implemented this")
3255
3256 def connect_port_network(self, port_id, network_id, admin=False):
3257 """Connects a external port to a network"""
3258 '''Returns status code of the VIM response'''
3259 raise vimconn.vimconnNotImplemented("Should have implemented this")
3260
3261 def new_vminstancefromJSON(self, vm_data):
3262 """Adds a VM instance to VIM"""
3263 '''Returns the instance identifier'''
3264 raise vimconn.vimconnNotImplemented("Should have implemented this")
3265
3266 def get_network_name_by_id(self, network_uuid=None):
3267 """Method gets vcloud director network named based on supplied uuid.
3268
3269 Args:
3270 network_uuid: network_id
3271
3272 Returns:
3273 The return network name.
3274 """
3275
3276 if not network_uuid:
3277 return None
3278
3279 try:
3280 org_dict = self.get_org(self.org_uuid)
3281 if 'networks' in org_dict:
3282 org_network_dict = org_dict['networks']
3283 for net_uuid in org_network_dict:
3284 if net_uuid == network_uuid:
3285 return org_network_dict[net_uuid]
3286 except:
3287 self.logger.debug("Exception in get_network_name_by_id")
3288 self.logger.debug(traceback.format_exc())
3289
3290 return None
3291
3292 def get_network_id_by_name(self, network_name=None):
3293 """Method gets vcloud director network uuid based on supplied name.
3294
3295 Args:
3296 network_name: network_name
3297 Returns:
3298 The return network uuid.
3299 network_uuid: network_id
3300 """
3301 if not network_name:
3302 self.logger.debug("get_network_id_by_name() : Network name is empty")
3303 return None
3304
3305 try:
3306 org_dict = self.get_org(self.org_uuid)
3307 if org_dict and 'networks' in org_dict:
3308 org_network_dict = org_dict['networks']
3309 for net_uuid,net_name in org_network_dict.iteritems():
3310 if net_name == network_name:
3311 return net_uuid
3312
3313 except KeyError as exp:
3314 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3315
3316 return None
3317
3318 def get_physical_network_by_name(self, physical_network_name):
3319 '''
3320 Methos returns uuid of physical network which passed
3321 Args:
3322 physical_network_name: physical network name
3323 Returns:
3324 UUID of physical_network_name
3325 '''
3326 try:
3327 client_as_admin = self.connect_as_admin()
3328 if not client_as_admin:
3329 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3330 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3331 vm_list_rest_call = ''.join(url_list)
3332
3333 if client_as_admin._session:
3334 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3335 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3336
3337 response = self.perform_request(req_type='GET',
3338 url=vm_list_rest_call,
3339 headers=headers)
3340
3341 provider_network = None
3342 available_network = None
3343 add_vdc_rest_url = None
3344
3345 if response.status_code != requests.codes.ok:
3346 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3347 response.status_code))
3348 return None
3349 else:
3350 try:
3351 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3352 for child in vm_list_xmlroot:
3353
3354 if child.tag.split("}")[1] == 'ProviderVdcReference':
3355 provider_network = child.attrib.get('href')
3356 # application/vnd.vmware.admin.providervdc+xml
3357 if child.tag.split("}")[1] == 'Link':
3358 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3359 and child.attrib.get('rel') == 'add':
3360 add_vdc_rest_url = child.attrib.get('href')
3361 except:
3362 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3363 self.logger.debug("Respond body {}".format(response.content))
3364 return None
3365
3366 # find pvdc provided available network
3367 response = self.perform_request(req_type='GET',
3368 url=provider_network,
3369 headers=headers)
3370
3371 if response.status_code != requests.codes.ok:
3372 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3373 response.status_code))
3374 return None
3375
3376 try:
3377 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3378 for child in vm_list_xmlroot.iter():
3379 if child.tag.split("}")[1] == 'AvailableNetworks':
3380 for networks in child.iter():
3381 if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
3382 if networks.attrib.get('name') == physical_network_name:
3383 network_url = networks.attrib.get('href')
3384 available_network = network_url[network_url.rindex('/')+1:]
3385 break
3386 except Exception as e:
3387 return None
3388
3389 return available_network
3390 except Exception as e:
3391 self.logger.error("Error while getting physical network: {}".format(e))
3392
3393 def list_org_action(self):
3394 """
3395 Method leverages vCloud director and query for available organization for particular user
3396
3397 Args:
3398 vca - is active VCA connection.
3399 vdc_name - is a vdc name that will be used to query vms action
3400
3401 Returns:
3402 The return XML respond
3403 """
3404 url_list = [self.url, '/api/org']
3405 vm_list_rest_call = ''.join(url_list)
3406
3407 if self.client._session:
3408 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3409 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3410
3411 response = self.perform_request(req_type='GET',
3412 url=vm_list_rest_call,
3413 headers=headers)
3414
3415 if response.status_code == 403:
3416 response = self.retry_rest('GET', vm_list_rest_call)
3417
3418 if response.status_code == requests.codes.ok:
3419 return response.content
3420
3421 return None
3422
3423 def get_org_action(self, org_uuid=None):
3424 """
3425 Method leverages vCloud director and retrieve available object for organization.
3426
3427 Args:
3428 org_uuid - vCD organization uuid
3429 self.client - is active connection.
3430
3431 Returns:
3432 The return XML respond
3433 """
3434
3435 if org_uuid is None:
3436 return None
3437
3438 url_list = [self.url, '/api/org/', org_uuid]
3439 vm_list_rest_call = ''.join(url_list)
3440
3441 if self.client._session:
3442 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3443 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3444
3445 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3446 response = self.perform_request(req_type='GET',
3447 url=vm_list_rest_call,
3448 headers=headers)
3449 if response.status_code == 403:
3450 response = self.retry_rest('GET', vm_list_rest_call)
3451
3452 if response.status_code == requests.codes.ok:
3453 return response.content
3454 return None
3455
3456 def get_org(self, org_uuid=None):
3457 """
3458 Method retrieves available organization in vCloud Director
3459
3460 Args:
3461 org_uuid - is a organization uuid.
3462
3463 Returns:
3464 The return dictionary with following key
3465 "network" - for network list under the org
3466 "catalogs" - for network list under the org
3467 "vdcs" - for vdc list under org
3468 """
3469
3470 org_dict = {}
3471
3472 if org_uuid is None:
3473 return org_dict
3474
3475 content = self.get_org_action(org_uuid=org_uuid)
3476 try:
3477 vdc_list = {}
3478 network_list = {}
3479 catalog_list = {}
3480 vm_list_xmlroot = XmlElementTree.fromstring(content)
3481 for child in vm_list_xmlroot:
3482 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3483 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3484 org_dict['vdcs'] = vdc_list
3485 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3486 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3487 org_dict['networks'] = network_list
3488 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3489 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3490 org_dict['catalogs'] = catalog_list
3491 except:
3492 pass
3493
3494 return org_dict
3495
3496 def get_org_list(self):
3497 """
3498 Method retrieves available organization in vCloud Director
3499
3500 Args:
3501 vca - is active VCA connection.
3502
3503 Returns:
3504 The return dictionary and key for each entry VDC UUID
3505 """
3506
3507 org_dict = {}
3508
3509 content = self.list_org_action()
3510 try:
3511 vm_list_xmlroot = XmlElementTree.fromstring(content)
3512 for vm_xml in vm_list_xmlroot:
3513 if vm_xml.tag.split("}")[1] == 'Org':
3514 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3515 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3516 except:
3517 pass
3518
3519 return org_dict
3520
3521 def vms_view_action(self, vdc_name=None):
3522 """ Method leverages vCloud director vms query call
3523
3524 Args:
3525 vca - is active VCA connection.
3526 vdc_name - is a vdc name that will be used to query vms action
3527
3528 Returns:
3529 The return XML respond
3530 """
3531 vca = self.connect()
3532 if vdc_name is None:
3533 return None
3534
3535 url_list = [vca.host, '/api/vms/query']
3536 vm_list_rest_call = ''.join(url_list)
3537
3538 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3539 refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
3540 vca.vcloud_session.organization.Link)
3541 #For python3
3542 #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
3543 # ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3544 if len(refs) == 1:
3545 response = Http.get(url=vm_list_rest_call,
3546 headers=vca.vcloud_session.get_vcloud_headers(),
3547 verify=vca.verify,
3548 logger=vca.logger)
3549 if response.status_code == requests.codes.ok:
3550 return response.content
3551
3552 return None
3553
3554 def get_vapp_list(self, vdc_name=None):
3555 """
3556 Method retrieves vApp list deployed vCloud director and returns a dictionary
3557 contains a list of all vapp deployed for queried VDC.
3558 The key for a dictionary is vApp UUID
3559
3560
3561 Args:
3562 vca - is active VCA connection.
3563 vdc_name - is a vdc name that will be used to query vms action
3564
3565 Returns:
3566 The return dictionary and key for each entry vapp UUID
3567 """
3568
3569 vapp_dict = {}
3570 if vdc_name is None:
3571 return vapp_dict
3572
3573 content = self.vms_view_action(vdc_name=vdc_name)
3574 try:
3575 vm_list_xmlroot = XmlElementTree.fromstring(content)
3576 for vm_xml in vm_list_xmlroot:
3577 if vm_xml.tag.split("}")[1] == 'VMRecord':
3578 if vm_xml.attrib['isVAppTemplate'] == 'true':
3579 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3580 if 'vappTemplate-' in rawuuid[0]:
3581 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3582 # vm and use raw UUID as key
3583 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3584 except:
3585 pass
3586
3587 return vapp_dict
3588
3589 def get_vm_list(self, vdc_name=None):
3590 """
3591 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3592 contains a list of all VM's deployed for queried VDC.
3593 The key for a dictionary is VM UUID
3594
3595
3596 Args:
3597 vca - is active VCA connection.
3598 vdc_name - is a vdc name that will be used to query vms action
3599
3600 Returns:
3601 The return dictionary and key for each entry vapp UUID
3602 """
3603 vm_dict = {}
3604
3605 if vdc_name is None:
3606 return vm_dict
3607
3608 content = self.vms_view_action(vdc_name=vdc_name)
3609 try:
3610 vm_list_xmlroot = XmlElementTree.fromstring(content)
3611 for vm_xml in vm_list_xmlroot:
3612 if vm_xml.tag.split("}")[1] == 'VMRecord':
3613 if vm_xml.attrib['isVAppTemplate'] == 'false':
3614 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3615 if 'vm-' in rawuuid[0]:
3616 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3617 # vm and use raw UUID as key
3618 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3619 except:
3620 pass
3621
3622 return vm_dict
3623
3624 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3625 """
3626 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3627 contains a list of all VM's deployed for queried VDC.
3628 The key for a dictionary is VM UUID
3629
3630
3631 Args:
3632 vca - is active VCA connection.
3633 vdc_name - is a vdc name that will be used to query vms action
3634
3635 Returns:
3636 The return dictionary and key for each entry vapp UUID
3637 """
3638 vm_dict = {}
3639 vca = self.connect()
3640 if not vca:
3641 raise vimconn.vimconnConnectionException("self.connect() is failed")
3642
3643 if vdc_name is None:
3644 return vm_dict
3645
3646 content = self.vms_view_action(vdc_name=vdc_name)
3647 try:
3648 vm_list_xmlroot = XmlElementTree.fromstring(content)
3649 for vm_xml in vm_list_xmlroot:
3650 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3651 # lookup done by UUID
3652 if isuuid:
3653 if vapp_name in vm_xml.attrib['container']:
3654 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3655 if 'vm-' in rawuuid[0]:
3656 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3657 break
3658 # lookup done by Name
3659 else:
3660 if vapp_name in vm_xml.attrib['name']:
3661 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3662 if 'vm-' in rawuuid[0]:
3663 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3664 break
3665 except:
3666 pass
3667
3668 return vm_dict
3669
3670 def get_network_action(self, network_uuid=None):
3671 """
3672 Method leverages vCloud director and query network based on network uuid
3673
3674 Args:
3675 vca - is active VCA connection.
3676 network_uuid - is a network uuid
3677
3678 Returns:
3679 The return XML respond
3680 """
3681
3682 if network_uuid is None:
3683 return None
3684
3685 url_list = [self.url, '/api/network/', network_uuid]
3686 vm_list_rest_call = ''.join(url_list)
3687
3688 if self.client._session:
3689 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3690 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3691
3692 response = self.perform_request(req_type='GET',
3693 url=vm_list_rest_call,
3694 headers=headers)
3695 #Retry login if session expired & retry sending request
3696 if response.status_code == 403:
3697 response = self.retry_rest('GET', vm_list_rest_call)
3698
3699 if response.status_code == requests.codes.ok:
3700 return response.content
3701
3702 return None
3703
3704 def get_vcd_network(self, network_uuid=None):
3705 """
3706 Method retrieves available network from vCloud Director
3707
3708 Args:
3709 network_uuid - is VCD network UUID
3710
3711 Each element serialized as key : value pair
3712
3713 Following keys available for access. network_configuration['Gateway'}
3714 <Configuration>
3715 <IpScopes>
3716 <IpScope>
3717 <IsInherited>true</IsInherited>
3718 <Gateway>172.16.252.100</Gateway>
3719 <Netmask>255.255.255.0</Netmask>
3720 <Dns1>172.16.254.201</Dns1>
3721 <Dns2>172.16.254.202</Dns2>
3722 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3723 <IsEnabled>true</IsEnabled>
3724 <IpRanges>
3725 <IpRange>
3726 <StartAddress>172.16.252.1</StartAddress>
3727 <EndAddress>172.16.252.99</EndAddress>
3728 </IpRange>
3729 </IpRanges>
3730 </IpScope>
3731 </IpScopes>
3732 <FenceMode>bridged</FenceMode>
3733
3734 Returns:
3735 The return dictionary and key for each entry vapp UUID
3736 """
3737
3738 network_configuration = {}
3739 if network_uuid is None:
3740 return network_uuid
3741
3742 try:
3743 content = self.get_network_action(network_uuid=network_uuid)
3744 vm_list_xmlroot = XmlElementTree.fromstring(content)
3745
3746 network_configuration['status'] = vm_list_xmlroot.get("status")
3747 network_configuration['name'] = vm_list_xmlroot.get("name")
3748 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3749
3750 for child in vm_list_xmlroot:
3751 if child.tag.split("}")[1] == 'IsShared':
3752 network_configuration['isShared'] = child.text.strip()
3753 if child.tag.split("}")[1] == 'Configuration':
3754 for configuration in child.iter():
3755 tagKey = configuration.tag.split("}")[1].strip()
3756 if tagKey != "":
3757 network_configuration[tagKey] = configuration.text.strip()
3758 return network_configuration
3759 except Exception as exp :
3760 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3761 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3762
3763 return network_configuration
3764
3765 def delete_network_action(self, network_uuid=None):
3766 """
3767 Method delete given network from vCloud director
3768
3769 Args:
3770 network_uuid - is a network uuid that client wish to delete
3771
3772 Returns:
3773 The return None or XML respond or false
3774 """
3775 client = self.connect_as_admin()
3776 if not client:
3777 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3778 if network_uuid is None:
3779 return False
3780
3781 url_list = [self.url, '/api/admin/network/', network_uuid]
3782 vm_list_rest_call = ''.join(url_list)
3783
3784 if client._session:
3785 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3786 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3787 response = self.perform_request(req_type='DELETE',
3788 url=vm_list_rest_call,
3789 headers=headers)
3790 if response.status_code == 202:
3791 return True
3792
3793 return False
3794
3795 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3796 ip_profile=None, isshared='true'):
3797 """
3798 Method create network in vCloud director
3799
3800 Args:
3801 network_name - is network name to be created.
3802 net_type - can be 'bridge','data','ptp','mgmt'.
3803 ip_profile is a dict containing the IP parameters of the network
3804 isshared - is a boolean
3805 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3806 It optional attribute. by default if no parent network indicate the first available will be used.
3807
3808 Returns:
3809 The return network uuid or return None
3810 """
3811
3812 new_network_name = [network_name, '-', str(uuid.uuid4())]
3813 content = self.create_network_rest(network_name=''.join(new_network_name),
3814 ip_profile=ip_profile,
3815 net_type=net_type,
3816 parent_network_uuid=parent_network_uuid,
3817 isshared=isshared)
3818 if content is None:
3819 self.logger.debug("Failed create network {}.".format(network_name))
3820 return None
3821
3822 try:
3823 vm_list_xmlroot = XmlElementTree.fromstring(content)
3824 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3825 if len(vcd_uuid) == 4:
3826 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3827 return vcd_uuid[3]
3828 except:
3829 self.logger.debug("Failed create network {}".format(network_name))
3830 return None
3831
3832 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3833 ip_profile=None, isshared='true'):
3834 """
3835 Method create network in vCloud director
3836
3837 Args:
3838 network_name - is network name to be created.
3839 net_type - can be 'bridge','data','ptp','mgmt'.
3840 ip_profile is a dict containing the IP parameters of the network
3841 isshared - is a boolean
3842 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3843 It optional attribute. by default if no parent network indicate the first available will be used.
3844
3845 Returns:
3846 The return network uuid or return None
3847 """
3848 client_as_admin = self.connect_as_admin()
3849 if not client_as_admin:
3850 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3851 if network_name is None:
3852 return None
3853
3854 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3855 vm_list_rest_call = ''.join(url_list)
3856
3857 if client_as_admin._session:
3858 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3859 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3860
3861 response = self.perform_request(req_type='GET',
3862 url=vm_list_rest_call,
3863 headers=headers)
3864
3865 provider_network = None
3866 available_networks = None
3867 add_vdc_rest_url = None
3868
3869 if response.status_code != requests.codes.ok:
3870 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3871 response.status_code))
3872 return None
3873 else:
3874 try:
3875 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3876 for child in vm_list_xmlroot:
3877
3878 if child.tag.split("}")[1] == 'ProviderVdcReference':
3879 provider_network = child.attrib.get('href')
3880 # application/vnd.vmware.admin.providervdc+xml
3881 if child.tag.split("}")[1] == 'Link':
3882 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3883 and child.attrib.get('rel') == 'add':
3884 add_vdc_rest_url = child.attrib.get('href')
3885 except:
3886 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3887 self.logger.debug("Respond body {}".format(response.content))
3888 return None
3889
3890 # find pvdc provided available network
3891 response = self.perform_request(req_type='GET',
3892 url=provider_network,
3893 headers=headers)
3894
3895 if response.status_code != requests.codes.ok:
3896 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3897 response.status_code))
3898 return None
3899
3900 if parent_network_uuid is None:
3901 try:
3902 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
3903 for child in vm_list_xmlroot.iter():
3904 if child.tag.split("}")[1] == 'AvailableNetworks':
3905 for networks in child.iter():
3906 # application/vnd.vmware.admin.network+xml
3907 if networks.attrib.get('href') is not None:
3908 available_networks = networks.attrib.get('href')
3909 break
3910 except:
3911 return None
3912
3913 try:
3914 #Configure IP profile of the network
3915 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3916
3917 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3918 subnet_rand = random.randint(0, 255)
3919 ip_base = "192.168.{}.".format(subnet_rand)
3920 ip_profile['subnet_address'] = ip_base + "0/24"
3921 else:
3922 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3923
3924 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3925 ip_profile['gateway_address']=ip_base + "1"
3926 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3927 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3928 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3929 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3930 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3931 ip_profile['dhcp_start_address']=ip_base + "3"
3932 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3933 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3934 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3935 ip_profile['dns_address']=ip_base + "2"
3936
3937 gateway_address=ip_profile['gateway_address']
3938 dhcp_count=int(ip_profile['dhcp_count'])
3939 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3940
3941 if ip_profile['dhcp_enabled']==True:
3942 dhcp_enabled='true'
3943 else:
3944 dhcp_enabled='false'
3945 dhcp_start_address=ip_profile['dhcp_start_address']
3946
3947 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3948 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3949 end_ip_int += dhcp_count - 1
3950 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3951
3952 ip_version=ip_profile['ip_version']
3953 dns_address=ip_profile['dns_address']
3954 except KeyError as exp:
3955 self.logger.debug("Create Network REST: Key error {}".format(exp))
3956 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3957
3958 # either use client provided UUID or search for a first available
3959 # if both are not defined we return none
3960 if parent_network_uuid is not None:
3961 provider_network = None
3962 available_networks = None
3963 add_vdc_rest_url = None
3964
3965 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3966 add_vdc_rest_url = ''.join(url_list)
3967
3968 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3969 available_networks = ''.join(url_list)
3970
3971 #Creating all networks as Direct Org VDC type networks.
3972 #Unused in case of Underlay (data/ptp) network interface.
3973 fence_mode="isolated"
3974 is_inherited='false'
3975 dns_list = dns_address.split(";")
3976 dns1 = dns_list[0]
3977 dns2_text = ""
3978 if len(dns_list) >= 2:
3979 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3980 if net_type == "isolated":
3981 fence_mode="isolated"
3982 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3983 <Description>Openmano created</Description>
3984 <Configuration>
3985 <IpScopes>
3986 <IpScope>
3987 <IsInherited>{1:s}</IsInherited>
3988 <Gateway>{2:s}</Gateway>
3989 <Netmask>{3:s}</Netmask>
3990 <Dns1>{4:s}</Dns1>{5:s}
3991 <IsEnabled>{6:s}</IsEnabled>
3992 <IpRanges>
3993 <IpRange>
3994 <StartAddress>{7:s}</StartAddress>
3995 <EndAddress>{8:s}</EndAddress>
3996 </IpRange>
3997 </IpRanges>
3998 </IpScope>
3999 </IpScopes>
4000 <FenceMode>{9:s}</FenceMode>
4001 </Configuration>
4002 <IsShared>{10:s}</IsShared>
4003 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
4004 subnet_address, dns1, dns2_text, dhcp_enabled,
4005 dhcp_start_address, dhcp_end_address,
4006 fence_mode, isshared)
4007 else:
4008 fence_mode = "bridged"
4009 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4010 <Description>Openmano created</Description>
4011 <Configuration>
4012 <IpScopes>
4013 <IpScope>
4014 <IsInherited>{1:s}</IsInherited>
4015 <Gateway>{2:s}</Gateway>
4016 <Netmask>{3:s}</Netmask>
4017 <Dns1>{4:s}</Dns1>{5:s}
4018 <IsEnabled>{6:s}</IsEnabled>
4019 <IpRanges>
4020 <IpRange>
4021 <StartAddress>{7:s}</StartAddress>
4022 <EndAddress>{8:s}</EndAddress>
4023 </IpRange>
4024 </IpRanges>
4025 </IpScope>
4026 </IpScopes>
4027 <ParentNetwork href="{9:s}"/>
4028 <FenceMode>{10:s}</FenceMode>
4029 </Configuration>
4030 <IsShared>{11:s}</IsShared>
4031 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
4032 subnet_address, dns1, dns2_text, dhcp_enabled,
4033 dhcp_start_address, dhcp_end_address, available_networks,
4034 fence_mode, isshared)
4035
4036 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
4037 try:
4038 response = self.perform_request(req_type='POST',
4039 url=add_vdc_rest_url,
4040 headers=headers,
4041 data=data)
4042
4043 if response.status_code != 201:
4044 self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
4045 .format(response.status_code,response.content))
4046 else:
4047 network_task = self.get_task_from_response(response.content)
4048 self.logger.debug("Create Network REST : Waiting for Network creation complete")
4049 time.sleep(5)
4050 result = self.client.get_task_monitor().wait_for_success(task=network_task)
4051 if result.get('status') == 'success':
4052 return response.content
4053 else:
4054 self.logger.debug("create_network_rest task failed. Network Create response : {}"
4055 .format(response.content))
4056 except Exception as exp:
4057 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
4058
4059 return None
4060
4061 def convert_cidr_to_netmask(self, cidr_ip=None):
4062 """
4063 Method sets convert CIDR netmask address to normal IP format
4064 Args:
4065 cidr_ip : CIDR IP address
4066 Returns:
4067 netmask : Converted netmask
4068 """
4069 if cidr_ip is not None:
4070 if '/' in cidr_ip:
4071 network, net_bits = cidr_ip.split('/')
4072 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
4073 else:
4074 netmask = cidr_ip
4075 return netmask
4076 return None
4077
4078 def get_provider_rest(self, vca=None):
4079 """
4080 Method gets provider vdc view from vcloud director
4081
4082 Args:
4083 network_name - is network name to be created.
4084 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4085 It optional attribute. by default if no parent network indicate the first available will be used.
4086
4087 Returns:
4088 The return xml content of respond or None
4089 """
4090
4091 url_list = [self.url, '/api/admin']
4092 if vca:
4093 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4094 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4095 response = self.perform_request(req_type='GET',
4096 url=''.join(url_list),
4097 headers=headers)
4098
4099 if response.status_code == requests.codes.ok:
4100 return response.content
4101 return None
4102
4103 def create_vdc(self, vdc_name=None):
4104
4105 vdc_dict = {}
4106
4107 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4108 if xml_content is not None:
4109 try:
4110 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4111 for child in task_resp_xmlroot:
4112 if child.tag.split("}")[1] == 'Owner':
4113 vdc_id = child.attrib.get('href').split("/")[-1]
4114 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
4115 return vdc_dict
4116 except:
4117 self.logger.debug("Respond body {}".format(xml_content))
4118
4119 return None
4120
4121 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4122 """
4123 Method create vdc in vCloud director based on VDC template.
4124 it uses pre-defined template.
4125
4126 Args:
4127 vdc_name - name of a new vdc.
4128
4129 Returns:
4130 The return xml content of respond or None
4131 """
4132 # pre-requesite atleast one vdc template should be available in vCD
4133 self.logger.info("Creating new vdc {}".format(vdc_name))
4134 vca = self.connect_as_admin()
4135 if not vca:
4136 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4137 if vdc_name is None:
4138 return None
4139
4140 url_list = [self.url, '/api/vdcTemplates']
4141 vm_list_rest_call = ''.join(url_list)
4142
4143 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4144 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4145 response = self.perform_request(req_type='GET',
4146 url=vm_list_rest_call,
4147 headers=headers)
4148
4149 # container url to a template
4150 vdc_template_ref = None
4151 try:
4152 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4153 for child in vm_list_xmlroot:
4154 # application/vnd.vmware.admin.providervdc+xml
4155 # we need find a template from witch we instantiate VDC
4156 if child.tag.split("}")[1] == 'VdcTemplate':
4157 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4158 vdc_template_ref = child.attrib.get('href')
4159 except:
4160 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4161 self.logger.debug("Respond body {}".format(response.content))
4162 return None
4163
4164 # if we didn't found required pre defined template we return None
4165 if vdc_template_ref is None:
4166 return None
4167
4168 try:
4169 # instantiate vdc
4170 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4171 vm_list_rest_call = ''.join(url_list)
4172 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4173 <Source href="{1:s}"></Source>
4174 <Description>opnemano</Description>
4175 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4176
4177 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4178
4179 response = self.perform_request(req_type='POST',
4180 url=vm_list_rest_call,
4181 headers=headers,
4182 data=data)
4183
4184 vdc_task = self.get_task_from_response(response.content)
4185 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4186
4187 # if we all ok we respond with content otherwise by default None
4188 if response.status_code >= 200 and response.status_code < 300:
4189 return response.content
4190 return None
4191 except:
4192 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4193 self.logger.debug("Respond body {}".format(response.content))
4194
4195 return None
4196
4197 def create_vdc_rest(self, vdc_name=None):
4198 """
4199 Method create network in vCloud director
4200
4201 Args:
4202 vdc_name - vdc name to be created
4203 Returns:
4204 The return response
4205 """
4206
4207 self.logger.info("Creating new vdc {}".format(vdc_name))
4208
4209 vca = self.connect_as_admin()
4210 if not vca:
4211 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4212 if vdc_name is None:
4213 return None
4214
4215 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4216 vm_list_rest_call = ''.join(url_list)
4217
4218 if vca._session:
4219 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4220 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4221 response = self.perform_request(req_type='GET',
4222 url=vm_list_rest_call,
4223 headers=headers)
4224
4225 provider_vdc_ref = None
4226 add_vdc_rest_url = None
4227 available_networks = None
4228
4229 if response.status_code != requests.codes.ok:
4230 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4231 response.status_code))
4232 return None
4233 else:
4234 try:
4235 vm_list_xmlroot = XmlElementTree.fromstring(response.content)
4236 for child in vm_list_xmlroot:
4237 # application/vnd.vmware.admin.providervdc+xml
4238 if child.tag.split("}")[1] == 'Link':
4239 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4240 and child.attrib.get('rel') == 'add':
4241 add_vdc_rest_url = child.attrib.get('href')
4242 except:
4243 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4244 self.logger.debug("Respond body {}".format(response.content))
4245 return None
4246
4247 response = self.get_provider_rest(vca=vca)
4248 try:
4249 vm_list_xmlroot = XmlElementTree.fromstring(response)
4250 for child in vm_list_xmlroot:
4251 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4252 for sub_child in child:
4253 provider_vdc_ref = sub_child.attrib.get('href')
4254 except:
4255 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4256 self.logger.debug("Respond body {}".format(response))
4257 return None
4258
4259 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4260 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4261 <AllocationModel>ReservationPool</AllocationModel>
4262 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4263 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4264 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4265 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4266 <ProviderVdcReference
4267 name="Main Provider"
4268 href="{2:s}" />
4269 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4270 escape(vdc_name),
4271 provider_vdc_ref)
4272
4273 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4274
4275 response = self.perform_request(req_type='POST',
4276 url=add_vdc_rest_url,
4277 headers=headers,
4278 data=data)
4279
4280 # if we all ok we respond with content otherwise by default None
4281 if response.status_code == 201:
4282 return response.content
4283 return None
4284
4285 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4286 """
4287 Method retrieve vapp detail from vCloud director
4288
4289 Args:
4290 vapp_uuid - is vapp identifier.
4291
4292 Returns:
4293 The return network uuid or return None
4294 """
4295
4296 parsed_respond = {}
4297 vca = None
4298
4299 if need_admin_access:
4300 vca = self.connect_as_admin()
4301 else:
4302 vca = self.client
4303
4304 if not vca:
4305 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4306 if vapp_uuid is None:
4307 return None
4308
4309 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4310 get_vapp_restcall = ''.join(url_list)
4311
4312 if vca._session:
4313 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4314 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4315 response = self.perform_request(req_type='GET',
4316 url=get_vapp_restcall,
4317 headers=headers)
4318
4319 if response.status_code == 403:
4320 if need_admin_access == False:
4321 response = self.retry_rest('GET', get_vapp_restcall)
4322
4323 if response.status_code != requests.codes.ok:
4324 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4325 response.status_code))
4326 return parsed_respond
4327
4328 try:
4329 xmlroot_respond = XmlElementTree.fromstring(response.content)
4330 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4331
4332 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4333 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4334 'vmw': 'http://www.vmware.com/schema/ovf',
4335 'vm': 'http://www.vmware.com/vcloud/v1.5',
4336 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4337 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4338 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4339 }
4340
4341 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4342 if created_section is not None:
4343 parsed_respond['created'] = created_section.text
4344
4345 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4346 if network_section is not None and 'networkName' in network_section.attrib:
4347 parsed_respond['networkname'] = network_section.attrib['networkName']
4348
4349 ipscopes_section = \
4350 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4351 namespaces)
4352 if ipscopes_section is not None:
4353 for ipscope in ipscopes_section:
4354 for scope in ipscope:
4355 tag_key = scope.tag.split("}")[1]
4356 if tag_key == 'IpRanges':
4357 ip_ranges = scope.getchildren()
4358 for ipblock in ip_ranges:
4359 for block in ipblock:
4360 parsed_respond[block.tag.split("}")[1]] = block.text
4361 else:
4362 parsed_respond[tag_key] = scope.text
4363
4364 # parse children section for other attrib
4365 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4366 if children_section is not None:
4367 parsed_respond['name'] = children_section.attrib['name']
4368 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4369 if "nestedHypervisorEnabled" in children_section.attrib else None
4370 parsed_respond['deployed'] = children_section.attrib['deployed']
4371 parsed_respond['status'] = children_section.attrib['status']
4372 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4373 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4374 nic_list = []
4375 for adapters in network_adapter:
4376 adapter_key = adapters.tag.split("}")[1]
4377 if adapter_key == 'PrimaryNetworkConnectionIndex':
4378 parsed_respond['primarynetwork'] = adapters.text
4379 if adapter_key == 'NetworkConnection':
4380 vnic = {}
4381 if 'network' in adapters.attrib:
4382 vnic['network'] = adapters.attrib['network']
4383 for adapter in adapters:
4384 setting_key = adapter.tag.split("}")[1]
4385 vnic[setting_key] = adapter.text
4386 nic_list.append(vnic)
4387
4388 for link in children_section:
4389 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4390 if link.attrib['rel'] == 'screen:acquireTicket':
4391 parsed_respond['acquireTicket'] = link.attrib
4392 if link.attrib['rel'] == 'screen:acquireMksTicket':
4393 parsed_respond['acquireMksTicket'] = link.attrib
4394
4395 parsed_respond['interfaces'] = nic_list
4396 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4397 if vCloud_extension_section is not None:
4398 vm_vcenter_info = {}
4399 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4400 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4401 if vmext is not None:
4402 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4403 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4404
4405 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4406 vm_virtual_hardware_info = {}
4407 if virtual_hardware_section is not None:
4408 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4409 if item.find("rasd:Description",namespaces).text == "Hard disk":
4410 disk_size = item.find("rasd:HostResource" ,namespaces
4411 ).attrib["{"+namespaces['vm']+"}capacity"]
4412
4413 vm_virtual_hardware_info["disk_size"]= disk_size
4414 break
4415
4416 for link in virtual_hardware_section:
4417 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4418 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4419 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4420 break
4421
4422 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4423 except Exception as exp :
4424 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4425 return parsed_respond
4426
4427 def acquire_console(self, vm_uuid=None):
4428
4429 if vm_uuid is None:
4430 return None
4431 if self.client._session:
4432 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4433 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4434 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4435 console_dict = vm_dict['acquireTicket']
4436 console_rest_call = console_dict['href']
4437
4438 response = self.perform_request(req_type='POST',
4439 url=console_rest_call,
4440 headers=headers)
4441
4442 if response.status_code == 403:
4443 response = self.retry_rest('POST', console_rest_call)
4444
4445 if response.status_code == requests.codes.ok:
4446 return response.content
4447
4448 return None
4449
4450 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4451 """
4452 Method retrieve vm disk details
4453
4454 Args:
4455 vapp_uuid - is vapp identifier.
4456 flavor_disk - disk size as specified in VNFD (flavor)
4457
4458 Returns:
4459 The return network uuid or return None
4460 """
4461 status = None
4462 try:
4463 #Flavor disk is in GB convert it into MB
4464 flavor_disk = int(flavor_disk) * 1024
4465 vm_details = self.get_vapp_details_rest(vapp_uuid)
4466 if vm_details:
4467 vm_name = vm_details["name"]
4468 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4469
4470 if vm_details and "vm_virtual_hardware" in vm_details:
4471 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4472 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4473
4474 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4475
4476 if flavor_disk > vm_disk:
4477 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4478 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4479 vm_disk, flavor_disk ))
4480 else:
4481 status = True
4482 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4483
4484 return status
4485 except Exception as exp:
4486 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4487
4488
4489 def modify_vm_disk_rest(self, disk_href , disk_size):
4490 """
4491 Method retrieve modify vm disk size
4492
4493 Args:
4494 disk_href - vCD API URL to GET and PUT disk data
4495 disk_size - disk size as specified in VNFD (flavor)
4496
4497 Returns:
4498 The return network uuid or return None
4499 """
4500 if disk_href is None or disk_size is None:
4501 return None
4502
4503 if self.client._session:
4504 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4505 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4506 response = self.perform_request(req_type='GET',
4507 url=disk_href,
4508 headers=headers)
4509
4510 if response.status_code == 403:
4511 response = self.retry_rest('GET', disk_href)
4512
4513 if response.status_code != requests.codes.ok:
4514 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4515 response.status_code))
4516 return None
4517 try:
4518 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4519 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
4520 #For python3
4521 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
4522 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4523
4524 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4525 if item.find("rasd:Description",namespaces).text == "Hard disk":
4526 disk_item = item.find("rasd:HostResource" ,namespaces )
4527 if disk_item is not None:
4528 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4529 break
4530
4531 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4532 xml_declaration=True)
4533
4534 #Send PUT request to modify disk size
4535 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4536
4537 response = self.perform_request(req_type='PUT',
4538 url=disk_href,
4539 headers=headers,
4540 data=data)
4541 if response.status_code == 403:
4542 add_headers = {'Content-Type': headers['Content-Type']}
4543 response = self.retry_rest('PUT', disk_href, add_headers, data)
4544
4545 if response.status_code != 202:
4546 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4547 response.status_code))
4548 else:
4549 modify_disk_task = self.get_task_from_response(response.content)
4550 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4551 if result.get('status') == 'success':
4552 return True
4553 else:
4554 return False
4555 return None
4556
4557 except Exception as exp :
4558 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4559 return None
4560
4561 def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
4562 """
4563 Method to attach pci devices to VM
4564
4565 Args:
4566 vapp_uuid - uuid of vApp/VM
4567 pci_devices - pci devices infromation as specified in VNFD (flavor)
4568
4569 Returns:
4570 The status of add pci device task , vm object and
4571 vcenter_conect object
4572 """
4573 vm_obj = None
4574 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
4575 vcenter_conect, content = self.get_vcenter_content()
4576 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4577
4578 if vm_moref_id:
4579 try:
4580 no_of_pci_devices = len(pci_devices)
4581 if no_of_pci_devices > 0:
4582 #Get VM and its host
4583 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4584 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4585 if host_obj and vm_obj:
4586 #get PCI devies from host on which vapp is currently installed
4587 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4588
4589 if avilable_pci_devices is None:
4590 #find other hosts with active pci devices
4591 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4592 content,
4593 no_of_pci_devices
4594 )
4595
4596 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4597 #Migrate vm to the host where PCI devices are availble
4598 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4599 task = self.relocate_vm(new_host_obj, vm_obj)
4600 if task is not None:
4601 result = self.wait_for_vcenter_task(task, vcenter_conect)
4602 self.logger.info("Migrate VM status: {}".format(result))
4603 host_obj = new_host_obj
4604 else:
4605 self.logger.info("Fail to migrate VM : {}".format(result))
4606 raise vimconn.vimconnNotFoundException(
4607 "Fail to migrate VM : {} to host {}".format(
4608 vmname_andid,
4609 new_host_obj)
4610 )
4611
4612 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4613 #Add PCI devices one by one
4614 for pci_device in avilable_pci_devices:
4615 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4616 if task:
4617 status= self.wait_for_vcenter_task(task, vcenter_conect)
4618 if status:
4619 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4620 else:
4621 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4622 return True, vm_obj, vcenter_conect
4623 else:
4624 self.logger.error("Currently there is no host with"\
4625 " {} number of avaialble PCI devices required for VM {}".format(
4626 no_of_pci_devices,
4627 vmname_andid)
4628 )
4629 raise vimconn.vimconnNotFoundException(
4630 "Currently there is no host with {} "\
4631 "number of avaialble PCI devices required for VM {}".format(
4632 no_of_pci_devices,
4633 vmname_andid))
4634 else:
4635 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4636
4637 except vmodl.MethodFault as error:
4638 self.logger.error("Error occurred while adding PCI devices {} ",error)
4639 return None, vm_obj, vcenter_conect
4640
4641 def get_vm_obj(self, content, mob_id):
4642 """
4643 Method to get the vsphere VM object associated with a given morf ID
4644 Args:
4645 vapp_uuid - uuid of vApp/VM
4646 content - vCenter content object
4647 mob_id - mob_id of VM
4648
4649 Returns:
4650 VM and host object
4651 """
4652 vm_obj = None
4653 host_obj = None
4654 try :
4655 container = content.viewManager.CreateContainerView(content.rootFolder,
4656 [vim.VirtualMachine], True
4657 )
4658 for vm in container.view:
4659 mobID = vm._GetMoId()
4660 if mobID == mob_id:
4661 vm_obj = vm
4662 host_obj = vm_obj.runtime.host
4663 break
4664 except Exception as exp:
4665 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4666 return host_obj, vm_obj
4667
4668 def get_pci_devices(self, host, need_devices):
4669 """
4670 Method to get the details of pci devices on given host
4671 Args:
4672 host - vSphere host object
4673 need_devices - number of pci devices needed on host
4674
4675 Returns:
4676 array of pci devices
4677 """
4678 all_devices = []
4679 all_device_ids = []
4680 used_devices_ids = []
4681
4682 try:
4683 if host:
4684 pciPassthruInfo = host.config.pciPassthruInfo
4685 pciDevies = host.hardware.pciDevice
4686
4687 for pci_status in pciPassthruInfo:
4688 if pci_status.passthruActive:
4689 for device in pciDevies:
4690 if device.id == pci_status.id:
4691 all_device_ids.append(device.id)
4692 all_devices.append(device)
4693
4694 #check if devices are in use
4695 avalible_devices = all_devices
4696 for vm in host.vm:
4697 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4698 vm_devices = vm.config.hardware.device
4699 for device in vm_devices:
4700 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4701 if device.backing.id in all_device_ids:
4702 for use_device in avalible_devices:
4703 if use_device.id == device.backing.id:
4704 avalible_devices.remove(use_device)
4705 used_devices_ids.append(device.backing.id)
4706 self.logger.debug("Device {} from devices {}"\
4707 "is in use".format(device.backing.id,
4708 device)
4709 )
4710 if len(avalible_devices) < need_devices:
4711 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4712 need_devices))
4713 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
4714 avalible_devices))
4715 return None
4716 else:
4717 required_devices = avalible_devices[:need_devices]
4718 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
4719 len(avalible_devices),
4720 host,
4721 need_devices))
4722 self.logger.info("Retruning {} devices as {}".format(need_devices,
4723 required_devices ))
4724 return required_devices
4725
4726 except Exception as exp:
4727 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4728
4729 return None
4730
4731 def get_host_and_PCIdevices(self, content, need_devices):
4732 """
4733 Method to get the details of pci devices infromation on all hosts
4734
4735 Args:
4736 content - vSphere host object
4737 need_devices - number of pci devices needed on host
4738
4739 Returns:
4740 array of pci devices and host object
4741 """
4742 host_obj = None
4743 pci_device_objs = None
4744 try:
4745 if content:
4746 container = content.viewManager.CreateContainerView(content.rootFolder,
4747 [vim.HostSystem], True)
4748 for host in container.view:
4749 devices = self.get_pci_devices(host, need_devices)
4750 if devices:
4751 host_obj = host
4752 pci_device_objs = devices
4753 break
4754 except Exception as exp:
4755 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4756
4757 return host_obj,pci_device_objs
4758
4759 def relocate_vm(self, dest_host, vm) :
4760 """
4761 Method to get the relocate VM to new host
4762
4763 Args:
4764 dest_host - vSphere host object
4765 vm - vSphere VM object
4766
4767 Returns:
4768 task object
4769 """
4770 task = None
4771 try:
4772 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4773 task = vm.Relocate(relocate_spec)
4774 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4775 except Exception as exp:
4776 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4777 dest_host, vm, exp))
4778 return task
4779
4780 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4781 """
4782 Waits and provides updates on a vSphere task
4783 """
4784 while task.info.state == vim.TaskInfo.State.running:
4785 time.sleep(2)
4786
4787 if task.info.state == vim.TaskInfo.State.success:
4788 if task.info.result is not None and not hideResult:
4789 self.logger.info('{} completed successfully, result: {}'.format(
4790 actionName,
4791 task.info.result))
4792 else:
4793 self.logger.info('Task {} completed successfully.'.format(actionName))
4794 else:
4795 self.logger.error('{} did not complete successfully: {} '.format(
4796 actionName,
4797 task.info.error)
4798 )
4799
4800 return task.info.result
4801
4802 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4803 """
4804 Method to add pci device in given VM
4805
4806 Args:
4807 host_object - vSphere host object
4808 vm_object - vSphere VM object
4809 host_pci_dev - host_pci_dev must be one of the devices from the
4810 host_object.hardware.pciDevice list
4811 which is configured as a PCI passthrough device
4812
4813 Returns:
4814 task object
4815 """
4816 task = None
4817 if vm_object and host_object and host_pci_dev:
4818 try :
4819 #Add PCI device to VM
4820 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4821 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4822
4823 if host_pci_dev.id not in systemid_by_pciid:
4824 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4825 return None
4826
4827 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4828 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4829 id=host_pci_dev.id,
4830 systemId=systemid_by_pciid[host_pci_dev.id],
4831 vendorId=host_pci_dev.vendorId,
4832 deviceName=host_pci_dev.deviceName)
4833
4834 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4835
4836 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4837 new_device_config.operation = "add"
4838 vmConfigSpec = vim.vm.ConfigSpec()
4839 vmConfigSpec.deviceChange = [new_device_config]
4840
4841 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4842 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4843 host_pci_dev, vm_object, host_object)
4844 )
4845 except Exception as exp:
4846 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4847 host_pci_dev,
4848 vm_object,
4849 exp))
4850 return task
4851
4852 def get_vm_vcenter_info(self):
4853 """
4854 Method to get details of vCenter and vm
4855
4856 Args:
4857 vapp_uuid - uuid of vApp or VM
4858
4859 Returns:
4860 Moref Id of VM and deails of vCenter
4861 """
4862 vm_vcenter_info = {}
4863
4864 if self.vcenter_ip is not None:
4865 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4866 else:
4867 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4868 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4869 if self.vcenter_port is not None:
4870 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4871 else:
4872 raise vimconn.vimconnException(message="vCenter port is not provided."\
4873 " Please provide vCenter port while attaching datacenter to tenant in --config")
4874 if self.vcenter_user is not None:
4875 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4876 else:
4877 raise vimconn.vimconnException(message="vCenter user is not provided."\
4878 " Please provide vCenter user while attaching datacenter to tenant in --config")
4879
4880 if self.vcenter_password is not None:
4881 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4882 else:
4883 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4884 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4885
4886 return vm_vcenter_info
4887
4888
4889 def get_vm_pci_details(self, vmuuid):
4890 """
4891 Method to get VM PCI device details from vCenter
4892
4893 Args:
4894 vm_obj - vSphere VM object
4895
4896 Returns:
4897 dict of PCI devives attached to VM
4898
4899 """
4900 vm_pci_devices_info = {}
4901 try:
4902 vcenter_conect, content = self.get_vcenter_content()
4903 vm_moref_id = self.get_vm_moref_id(vmuuid)
4904 if vm_moref_id:
4905 #Get VM and its host
4906 if content:
4907 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4908 if host_obj and vm_obj:
4909 vm_pci_devices_info["host_name"]= host_obj.name
4910 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4911 for device in vm_obj.config.hardware.device:
4912 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4913 device_details={'devide_id':device.backing.id,
4914 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4915 }
4916 vm_pci_devices_info[device.deviceInfo.label] = device_details
4917 else:
4918 self.logger.error("Can not connect to vCenter while getting "\
4919 "PCI devices infromationn")
4920 return vm_pci_devices_info
4921 except Exception as exp:
4922 self.logger.error("Error occurred while getting VM infromationn"\
4923 " for VM : {}".format(exp))
4924 raise vimconn.vimconnException(message=exp)
4925
4926
4927 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4928 """
4929 Method to reserve memory for all VMs
4930 Args :
4931 vapp - VApp
4932 memory_mb - Memory in MB
4933 Returns:
4934 None
4935 """
4936
4937 self.logger.info("Reserve memory for all VMs")
4938 for vms in vapp.get_all_vms():
4939 vm_id = vms.get('id').split(':')[-1]
4940
4941 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4942
4943 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4944 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4945 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4946 response = self.perform_request(req_type='GET',
4947 url=url_rest_call,
4948 headers=headers)
4949
4950 if response.status_code == 403:
4951 response = self.retry_rest('GET', url_rest_call)
4952
4953 if response.status_code != 200:
4954 self.logger.error("REST call {} failed reason : {}"\
4955 "status code : {}".format(url_rest_call,
4956 response.content,
4957 response.status_code))
4958 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4959 "memory")
4960
4961 bytexml = bytes(bytearray(response.content, encoding='utf-8'))
4962 contentelem = lxmlElementTree.XML(bytexml)
4963 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
4964 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4965
4966 # Find the reservation element in the response
4967 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4968 for memelem in memelem_list:
4969 memelem.text = str(memory_mb)
4970
4971 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4972
4973 response = self.perform_request(req_type='PUT',
4974 url=url_rest_call,
4975 headers=headers,
4976 data=newdata)
4977
4978 if response.status_code == 403:
4979 add_headers = {'Content-Type': headers['Content-Type']}
4980 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4981
4982 if response.status_code != 202:
4983 self.logger.error("REST call {} failed reason : {}"\
4984 "status code : {} ".format(url_rest_call,
4985 response.content,
4986 response.status_code))
4987 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4988 "virtual hardware memory section")
4989 else:
4990 mem_task = self.get_task_from_response(response.content)
4991 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4992 if result.get('status') == 'success':
4993 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4994 .format(vm_id))
4995 else:
4996 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4997 .format(vm_id))
4998
4999 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
5000 """
5001 Configure VApp network config with org vdc network
5002 Args :
5003 vapp - VApp
5004 Returns:
5005 None
5006 """
5007
5008 self.logger.info("Connecting vapp {} to org vdc network {}".
5009 format(vapp_id, net_name))
5010
5011 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
5012
5013 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5014 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5015 response = self.perform_request(req_type='GET',
5016 url=url_rest_call,
5017 headers=headers)
5018
5019 if response.status_code == 403:
5020 response = self.retry_rest('GET', url_rest_call)
5021
5022 if response.status_code != 200:
5023 self.logger.error("REST call {} failed reason : {}"\
5024 "status code : {}".format(url_rest_call,
5025 response.content,
5026 response.status_code))
5027 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
5028 "network config section")
5029
5030 data = response.content
5031 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
5032 net_id = self.get_network_id_by_name(net_name)
5033 if not net_id:
5034 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
5035 "existing network")
5036
5037 bytexml = bytes(bytearray(data, encoding='utf-8'))
5038 newelem = lxmlElementTree.XML(bytexml)
5039 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
5040 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5041 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
5042
5043 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
5044 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
5045 if parentnetworklist:
5046 for pn in parentnetworklist:
5047 if "href" not in pn.keys():
5048 id_val = pn.get("id")
5049 href_val = "{}/api/network/{}".format(self.url, id_val)
5050 pn.set("href", href_val)
5051
5052 newstr = """<NetworkConfig networkName="{}">
5053 <Configuration>
5054 <ParentNetwork href="{}/api/network/{}"/>
5055 <FenceMode>bridged</FenceMode>
5056 </Configuration>
5057 </NetworkConfig>
5058 """.format(net_name, self.url, net_id)
5059 newcfgelem = lxmlElementTree.fromstring(newstr)
5060 if nwcfglist:
5061 nwcfglist[0].addnext(newcfgelem)
5062
5063 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
5064
5065 response = self.perform_request(req_type='PUT',
5066 url=url_rest_call,
5067 headers=headers,
5068 data=newdata)
5069
5070 if response.status_code == 403:
5071 add_headers = {'Content-Type': headers['Content-Type']}
5072 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5073
5074 if response.status_code != 202:
5075 self.logger.error("REST call {} failed reason : {}"\
5076 "status code : {} ".format(url_rest_call,
5077 response.content,
5078 response.status_code))
5079 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
5080 "network config section")
5081 else:
5082 vapp_task = self.get_task_from_response(response.content)
5083 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
5084 if result.get('status') == 'success':
5085 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
5086 "network {}".format(vapp_id, net_name))
5087 else:
5088 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
5089 "connect to network {}".format(vapp_id, net_name))
5090
5091 def remove_primary_network_adapter_from_all_vms(self, vapp):
5092 """
5093 Method to remove network adapter type to vm
5094 Args :
5095 vapp - VApp
5096 Returns:
5097 None
5098 """
5099
5100 self.logger.info("Removing network adapter from all VMs")
5101 for vms in vapp.get_all_vms():
5102 vm_id = vms.get('id').split(':')[-1]
5103
5104 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5105
5106 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5107 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5108 response = self.perform_request(req_type='GET',
5109 url=url_rest_call,
5110 headers=headers)
5111
5112 if response.status_code == 403:
5113 response = self.retry_rest('GET', url_rest_call)
5114
5115 if response.status_code != 200:
5116 self.logger.error("REST call {} failed reason : {}"\
5117 "status code : {}".format(url_rest_call,
5118 response.content,
5119 response.status_code))
5120 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
5121 "network connection section")
5122
5123 data = response.content
5124 data = data.split('<Link rel="edit"')[0]
5125
5126 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5127
5128 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5129 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5130 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5131 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5132 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5133 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5134 xmlns:vmw="http://www.vmware.com/schema/ovf"
5135 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5136 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5137 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5138 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
5139 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5140 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5141 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5142 </NetworkConnectionSection>""".format(url=url_rest_call)
5143 response = self.perform_request(req_type='PUT',
5144 url=url_rest_call,
5145 headers=headers,
5146 data=newdata)
5147
5148 if response.status_code == 403:
5149 add_headers = {'Content-Type': headers['Content-Type']}
5150 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5151
5152 if response.status_code != 202:
5153 self.logger.error("REST call {} failed reason : {}"\
5154 "status code : {} ".format(url_rest_call,
5155 response.content,
5156 response.status_code))
5157 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5158 "network connection section")
5159 else:
5160 nic_task = self.get_task_from_response(response.content)
5161 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5162 if result.get('status') == 'success':
5163 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5164 "default NIC type".format(vm_id))
5165 else:
5166 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5167 "connect NIC type".format(vm_id))
5168
5169 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5170 """
5171 Method to add network adapter type to vm
5172 Args :
5173 network_name - name of network
5174 primary_nic_index - int value for primary nic index
5175 nicIndex - int value for nic index
5176 nic_type - specify model name to which add to vm
5177 Returns:
5178 None
5179 """
5180
5181 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5182 format(network_name, nicIndex, nic_type))
5183 try:
5184 ip_address = None
5185 floating_ip = False
5186 mac_address = None
5187 if 'floating_ip' in net: floating_ip = net['floating_ip']
5188
5189 # Stub for ip_address feature
5190 if 'ip_address' in net: ip_address = net['ip_address']
5191
5192 if 'mac_address' in net: mac_address = net['mac_address']
5193
5194 if floating_ip:
5195 allocation_mode = "POOL"
5196 elif ip_address:
5197 allocation_mode = "MANUAL"
5198 else:
5199 allocation_mode = "DHCP"
5200
5201 if not nic_type:
5202 for vms in vapp.get_all_vms():
5203 vm_id = vms.get('id').split(':')[-1]
5204
5205 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5206
5207 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5208 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5209 response = self.perform_request(req_type='GET',
5210 url=url_rest_call,
5211 headers=headers)
5212
5213 if response.status_code == 403:
5214 response = self.retry_rest('GET', url_rest_call)
5215
5216 if response.status_code != 200:
5217 self.logger.error("REST call {} failed reason : {}"\
5218 "status code : {}".format(url_rest_call,
5219 response.content,
5220 response.status_code))
5221 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5222 "network connection section")
5223
5224 data = response.content
5225 data = data.split('<Link rel="edit"')[0]
5226 if '<PrimaryNetworkConnectionIndex>' not in data:
5227 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5228 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5229 <NetworkConnection network="{}">
5230 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5231 <IsConnected>true</IsConnected>
5232 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5233 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5234 allocation_mode)
5235 # Stub for ip_address feature
5236 if ip_address:
5237 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5238 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5239
5240 if mac_address:
5241 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5242 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5243
5244 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5245 else:
5246 self.logger.debug("add_network_adapter PrimaryNIC in data")
5247 new_item = """<NetworkConnection network="{}">
5248 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5249 <IsConnected>true</IsConnected>
5250 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5251 </NetworkConnection>""".format(network_name, nicIndex,
5252 allocation_mode)
5253 # Stub for ip_address feature
5254 if ip_address:
5255 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5256 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5257
5258 if mac_address:
5259 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5260 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5261
5262 data = data + new_item + '</NetworkConnectionSection>'
5263
5264 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5265
5266 response = self.perform_request(req_type='PUT',
5267 url=url_rest_call,
5268 headers=headers,
5269 data=data)
5270
5271 if response.status_code == 403:
5272 add_headers = {'Content-Type': headers['Content-Type']}
5273 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5274
5275 if response.status_code != 202:
5276 self.logger.error("REST call {} failed reason : {}"\
5277 "status code : {} ".format(url_rest_call,
5278 response.content,
5279 response.status_code))
5280 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5281 "network connection section")
5282 else:
5283 nic_task = self.get_task_from_response(response.content)
5284 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5285 if result.get('status') == 'success':
5286 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5287 "default NIC type".format(vm_id))
5288 else:
5289 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5290 "connect NIC type".format(vm_id))
5291 else:
5292 for vms in vapp.get_all_vms():
5293 vm_id = vms.get('id').split(':')[-1]
5294
5295 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5296
5297 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5298 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5299 response = self.perform_request(req_type='GET',
5300 url=url_rest_call,
5301 headers=headers)
5302
5303 if response.status_code == 403:
5304 response = self.retry_rest('GET', url_rest_call)
5305
5306 if response.status_code != 200:
5307 self.logger.error("REST call {} failed reason : {}"\
5308 "status code : {}".format(url_rest_call,
5309 response.content,
5310 response.status_code))
5311 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5312 "network connection section")
5313 data = response.content
5314 data = data.split('<Link rel="edit"')[0]
5315 vcd_netadapter_type = nic_type
5316 if nic_type in ['SR-IOV', 'VF']:
5317 vcd_netadapter_type = "SRIOVETHERNETCARD"
5318
5319 if '<PrimaryNetworkConnectionIndex>' not in data:
5320 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5321 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5322 <NetworkConnection network="{}">
5323 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5324 <IsConnected>true</IsConnected>
5325 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5326 <NetworkAdapterType>{}</NetworkAdapterType>
5327 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5328 allocation_mode, vcd_netadapter_type)
5329 # Stub for ip_address feature
5330 if ip_address:
5331 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5332 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5333
5334 if mac_address:
5335 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5336 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5337
5338 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5339 else:
5340 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5341 new_item = """<NetworkConnection network="{}">
5342 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5343 <IsConnected>true</IsConnected>
5344 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5345 <NetworkAdapterType>{}</NetworkAdapterType>
5346 </NetworkConnection>""".format(network_name, nicIndex,
5347 allocation_mode, vcd_netadapter_type)
5348 # Stub for ip_address feature
5349 if ip_address:
5350 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5351 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5352
5353 if mac_address:
5354 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5355 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5356
5357 data = data + new_item + '</NetworkConnectionSection>'
5358
5359 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5360
5361 response = self.perform_request(req_type='PUT',
5362 url=url_rest_call,
5363 headers=headers,
5364 data=data)
5365
5366 if response.status_code == 403:
5367 add_headers = {'Content-Type': headers['Content-Type']}
5368 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5369
5370 if response.status_code != 202:
5371 self.logger.error("REST call {} failed reason : {}"\
5372 "status code : {}".format(url_rest_call,
5373 response.content,
5374 response.status_code))
5375 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5376 "network connection section")
5377 else:
5378 nic_task = self.get_task_from_response(response.content)
5379 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5380 if result.get('status') == 'success':
5381 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5382 "conneced to NIC type {}".format(vm_id, nic_type))
5383 else:
5384 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5385 "failed to connect NIC type {}".format(vm_id, nic_type))
5386 except Exception as exp:
5387 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5388 "while adding Network adapter")
5389 raise vimconn.vimconnException(message=exp)
5390
5391
5392 def set_numa_affinity(self, vmuuid, paired_threads_id):
5393 """
5394 Method to assign numa affinity in vm configuration parammeters
5395 Args :
5396 vmuuid - vm uuid
5397 paired_threads_id - one or more virtual processor
5398 numbers
5399 Returns:
5400 return if True
5401 """
5402 try:
5403 vcenter_conect, content = self.get_vcenter_content()
5404 vm_moref_id = self.get_vm_moref_id(vmuuid)
5405
5406 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5407 if vm_obj:
5408 config_spec = vim.vm.ConfigSpec()
5409 config_spec.extraConfig = []
5410 opt = vim.option.OptionValue()
5411 opt.key = 'numa.nodeAffinity'
5412 opt.value = str(paired_threads_id)
5413 config_spec.extraConfig.append(opt)
5414 task = vm_obj.ReconfigVM_Task(config_spec)
5415 if task:
5416 result = self.wait_for_vcenter_task(task, vcenter_conect)
5417 extra_config = vm_obj.config.extraConfig
5418 flag = False
5419 for opts in extra_config:
5420 if 'numa.nodeAffinity' in opts.key:
5421 flag = True
5422 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5423 "value {} for vm {}".format(opt.value, vm_obj))
5424 if flag:
5425 return
5426 else:
5427 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5428 except Exception as exp:
5429 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5430 "for VM {} : {}".format(vm_obj, vm_moref_id))
5431 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5432 "affinity".format(exp))
5433
5434
5435 def cloud_init(self, vapp, cloud_config):
5436 """
5437 Method to inject ssh-key
5438 vapp - vapp object
5439 cloud_config a dictionary with:
5440 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5441 'users': (optional) list of users to be inserted, each item is a dict with:
5442 'name': (mandatory) user name,
5443 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5444 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5445 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5446 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5447 'dest': (mandatory) string with the destination absolute path
5448 'encoding': (optional, by default text). Can be one of:
5449 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5450 'content' (mandatory): string with the content of the file
5451 'permissions': (optional) string with file permissions, typically octal notation '0644'
5452 'owner': (optional) file owner, string with the format 'owner:group'
5453 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5454 """
5455 try:
5456 if not isinstance(cloud_config, dict):
5457 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5458 else:
5459 key_pairs = []
5460 userdata = []
5461 if "key-pairs" in cloud_config:
5462 key_pairs = cloud_config["key-pairs"]
5463
5464 if "users" in cloud_config:
5465 userdata = cloud_config["users"]
5466
5467 self.logger.debug("cloud_init : Guest os customization started..")
5468 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5469 customize_script = customize_script.replace("&","&amp;")
5470 self.guest_customization(vapp, customize_script)
5471
5472 except Exception as exp:
5473 self.logger.error("cloud_init : exception occurred while injecting "\
5474 "ssh-key")
5475 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5476 "ssh-key".format(exp))
5477
5478 def format_script(self, key_pairs=[], users_list=[]):
5479 bash_script = """#!/bin/sh
5480 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5481 if [ "$1" = "precustomization" ];then
5482 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5483 """
5484
5485 keys = "\n".join(key_pairs)
5486 if keys:
5487 keys_data = """
5488 if [ ! -d /root/.ssh ];then
5489 mkdir /root/.ssh
5490 chown root:root /root/.ssh
5491 chmod 700 /root/.ssh
5492 touch /root/.ssh/authorized_keys
5493 chown root:root /root/.ssh/authorized_keys
5494 chmod 600 /root/.ssh/authorized_keys
5495 # make centos with selinux happy
5496 which restorecon && restorecon -Rv /root/.ssh
5497 else
5498 touch /root/.ssh/authorized_keys
5499 chown root:root /root/.ssh/authorized_keys
5500 chmod 600 /root/.ssh/authorized_keys
5501 fi
5502 echo '{key}' >> /root/.ssh/authorized_keys
5503 """.format(key=keys)
5504
5505 bash_script+= keys_data
5506
5507 for user in users_list:
5508 if 'name' in user: user_name = user['name']
5509 if 'key-pairs' in user:
5510 user_keys = "\n".join(user['key-pairs'])
5511 else:
5512 user_keys = None
5513
5514 add_user_name = """
5515 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5516 """.format(user_name=user_name)
5517
5518 bash_script+= add_user_name
5519
5520 if user_keys:
5521 user_keys_data = """
5522 mkdir /home/{user_name}/.ssh
5523 chown {user_name}:{user_name} /home/{user_name}/.ssh
5524 chmod 700 /home/{user_name}/.ssh
5525 touch /home/{user_name}/.ssh/authorized_keys
5526 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5527 chmod 600 /home/{user_name}/.ssh/authorized_keys
5528 # make centos with selinux happy
5529 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5530 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5531 """.format(user_name=user_name,user_key=user_keys)
5532
5533 bash_script+= user_keys_data
5534
5535 return bash_script+"\n\tfi"
5536
5537 def guest_customization(self, vapp, customize_script):
5538 """
5539 Method to customize guest os
5540 vapp - Vapp object
5541 customize_script - Customize script to be run at first boot of VM.
5542 """
5543 for vm in vapp.get_all_vms():
5544 vm_id = vm.get('id').split(':')[-1]
5545 vm_name = vm.get('name')
5546 vm_name = vm_name.replace('_','-')
5547
5548 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5549 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5550 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5551
5552 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5553
5554 data = """<GuestCustomizationSection
5555 xmlns="http://www.vmware.com/vcloud/v1.5"
5556 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5557 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5558 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5559 <Enabled>true</Enabled>
5560 <ChangeSid>false</ChangeSid>
5561 <VirtualMachineId>{}</VirtualMachineId>
5562 <JoinDomainEnabled>false</JoinDomainEnabled>
5563 <UseOrgSettings>false</UseOrgSettings>
5564 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5565 <AdminPasswordAuto>true</AdminPasswordAuto>
5566 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5567 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5568 <ResetPasswordRequired>false</ResetPasswordRequired>
5569 <CustomizationScript>{}</CustomizationScript>
5570 <ComputerName>{}</ComputerName>
5571 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5572 </GuestCustomizationSection>
5573 """.format(vm_customization_url,
5574 vm_id,
5575 customize_script,
5576 vm_name,
5577 vm_customization_url)
5578
5579 response = self.perform_request(req_type='PUT',
5580 url=vm_customization_url,
5581 headers=headers,
5582 data=data)
5583 if response.status_code == 202:
5584 guest_task = self.get_task_from_response(response.content)
5585 self.client.get_task_monitor().wait_for_success(task=guest_task)
5586 self.logger.info("guest_customization : customized guest os task "\
5587 "completed for VM {}".format(vm_name))
5588 else:
5589 self.logger.error("guest_customization : task for customized guest os"\
5590 "failed for VM {}".format(vm_name))
5591 raise vimconn.vimconnException("guest_customization : failed to perform"\
5592 "guest os customization on VM {}".format(vm_name))
5593
5594 def add_new_disk(self, vapp_uuid, disk_size):
5595 """
5596 Method to create an empty vm disk
5597
5598 Args:
5599 vapp_uuid - is vapp identifier.
5600 disk_size - size of disk to be created in GB
5601
5602 Returns:
5603 None
5604 """
5605 status = False
5606 vm_details = None
5607 try:
5608 #Disk size in GB, convert it into MB
5609 if disk_size is not None:
5610 disk_size_mb = int(disk_size) * 1024
5611 vm_details = self.get_vapp_details_rest(vapp_uuid)
5612
5613 if vm_details and "vm_virtual_hardware" in vm_details:
5614 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5615 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5616 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5617
5618 except Exception as exp:
5619 msg = "Error occurred while creating new disk {}.".format(exp)
5620 self.rollback_newvm(vapp_uuid, msg)
5621
5622 if status:
5623 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5624 else:
5625 #If failed to add disk, delete VM
5626 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5627 self.rollback_newvm(vapp_uuid, msg)
5628
5629
5630 def add_new_disk_rest(self, disk_href, disk_size_mb):
5631 """
5632 Retrives vApp Disks section & add new empty disk
5633
5634 Args:
5635 disk_href: Disk section href to addd disk
5636 disk_size_mb: Disk size in MB
5637
5638 Returns: Status of add new disk task
5639 """
5640 status = False
5641 if self.client._session:
5642 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5643 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5644 response = self.perform_request(req_type='GET',
5645 url=disk_href,
5646 headers=headers)
5647
5648 if response.status_code == 403:
5649 response = self.retry_rest('GET', disk_href)
5650
5651 if response.status_code != requests.codes.ok:
5652 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5653 .format(disk_href, response.status_code))
5654 return status
5655 try:
5656 #Find but type & max of instance IDs assigned to disks
5657 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5658 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
5659 #For python3
5660 #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
5661 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5662 instance_id = 0
5663 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5664 if item.find("rasd:Description",namespaces).text == "Hard disk":
5665 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5666 if inst_id > instance_id:
5667 instance_id = inst_id
5668 disk_item = item.find("rasd:HostResource" ,namespaces)
5669 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5670 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5671
5672 instance_id = instance_id + 1
5673 new_item = """<Item>
5674 <rasd:Description>Hard disk</rasd:Description>
5675 <rasd:ElementName>New disk</rasd:ElementName>
5676 <rasd:HostResource
5677 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5678 vcloud:capacity="{}"
5679 vcloud:busSubType="{}"
5680 vcloud:busType="{}"></rasd:HostResource>
5681 <rasd:InstanceID>{}</rasd:InstanceID>
5682 <rasd:ResourceType>17</rasd:ResourceType>
5683 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5684
5685 new_data = response.content
5686 #Add new item at the bottom
5687 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5688
5689 # Send PUT request to modify virtual hardware section with new disk
5690 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5691
5692 response = self.perform_request(req_type='PUT',
5693 url=disk_href,
5694 data=new_data,
5695 headers=headers)
5696
5697 if response.status_code == 403:
5698 add_headers = {'Content-Type': headers['Content-Type']}
5699 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5700
5701 if response.status_code != 202:
5702 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
5703 .format(disk_href, response.status_code, response.content))
5704 else:
5705 add_disk_task = self.get_task_from_response(response.content)
5706 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5707 if result.get('status') == 'success':
5708 status = True
5709 else:
5710 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5711
5712 except Exception as exp:
5713 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5714
5715 return status
5716
5717
5718 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5719 """
5720 Method to add existing disk to vm
5721 Args :
5722 catalogs - List of VDC catalogs
5723 image_id - Catalog ID
5724 template_name - Name of template in catalog
5725 vapp_uuid - UUID of vApp
5726 Returns:
5727 None
5728 """
5729 disk_info = None
5730 vcenter_conect, content = self.get_vcenter_content()
5731 #find moref-id of vm in image
5732 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5733 image_id=image_id,
5734 )
5735
5736 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5737 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5738 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5739 if catalog_vm_moref_id:
5740 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5741 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5742 if catalog_vm_obj:
5743 #find existing disk
5744 disk_info = self.find_disk(catalog_vm_obj)
5745 else:
5746 exp_msg = "No VM with image id {} found".format(image_id)
5747 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5748 else:
5749 exp_msg = "No Image found with image ID {} ".format(image_id)
5750 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5751
5752 if disk_info:
5753 self.logger.info("Existing disk_info : {}".format(disk_info))
5754 #get VM
5755 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5756 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5757 if vm_obj:
5758 status = self.add_disk(vcenter_conect=vcenter_conect,
5759 vm=vm_obj,
5760 disk_info=disk_info,
5761 size=size,
5762 vapp_uuid=vapp_uuid
5763 )
5764 if status:
5765 self.logger.info("Disk from image id {} added to {}".format(image_id,
5766 vm_obj.config.name)
5767 )
5768 else:
5769 msg = "No disk found with image id {} to add in VM {}".format(
5770 image_id,
5771 vm_obj.config.name)
5772 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5773
5774
5775 def find_disk(self, vm_obj):
5776 """
5777 Method to find details of existing disk in VM
5778 Args :
5779 vm_obj - vCenter object of VM
5780 image_id - Catalog ID
5781 Returns:
5782 disk_info : dict of disk details
5783 """
5784 disk_info = {}
5785 if vm_obj:
5786 try:
5787 devices = vm_obj.config.hardware.device
5788 for device in devices:
5789 if type(device) is vim.vm.device.VirtualDisk:
5790 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5791 disk_info["full_path"] = device.backing.fileName
5792 disk_info["datastore"] = device.backing.datastore
5793 disk_info["capacityKB"] = device.capacityInKB
5794 break
5795 except Exception as exp:
5796 self.logger.error("find_disk() : exception occurred while "\
5797 "getting existing disk details :{}".format(exp))
5798 return disk_info
5799
5800
5801 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5802 """
5803 Method to add existing disk in VM
5804 Args :
5805 vcenter_conect - vCenter content object
5806 vm - vCenter vm object
5807 disk_info : dict of disk details
5808 Returns:
5809 status : status of add disk task
5810 """
5811 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5812 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5813 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5814 if size is not None:
5815 #Convert size from GB to KB
5816 sizeKB = int(size) * 1024 * 1024
5817 #compare size of existing disk and user given size.Assign whicherver is greater
5818 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5819 sizeKB, capacityKB))
5820 if sizeKB > capacityKB:
5821 capacityKB = sizeKB
5822
5823 if datastore and fullpath and capacityKB:
5824 try:
5825 spec = vim.vm.ConfigSpec()
5826 # get all disks on a VM, set unit_number to the next available
5827 unit_number = 0
5828 for dev in vm.config.hardware.device:
5829 if hasattr(dev.backing, 'fileName'):
5830 unit_number = int(dev.unitNumber) + 1
5831 # unit_number 7 reserved for scsi controller
5832 if unit_number == 7:
5833 unit_number += 1
5834 if isinstance(dev, vim.vm.device.VirtualDisk):
5835 #vim.vm.device.VirtualSCSIController
5836 controller_key = dev.controllerKey
5837
5838 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5839 unit_number, controller_key))
5840 # add disk here
5841 dev_changes = []
5842 disk_spec = vim.vm.device.VirtualDeviceSpec()
5843 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5844 disk_spec.device = vim.vm.device.VirtualDisk()
5845 disk_spec.device.backing = \
5846 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5847 disk_spec.device.backing.thinProvisioned = True
5848 disk_spec.device.backing.diskMode = 'persistent'
5849 disk_spec.device.backing.datastore = datastore
5850 disk_spec.device.backing.fileName = fullpath
5851
5852 disk_spec.device.unitNumber = unit_number
5853 disk_spec.device.capacityInKB = capacityKB
5854 disk_spec.device.controllerKey = controller_key
5855 dev_changes.append(disk_spec)
5856 spec.deviceChange = dev_changes
5857 task = vm.ReconfigVM_Task(spec=spec)
5858 status = self.wait_for_vcenter_task(task, vcenter_conect)
5859 return status
5860 except Exception as exp:
5861 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5862 "{} to vm {}".format(exp,
5863 fullpath,
5864 vm.config.name)
5865 self.rollback_newvm(vapp_uuid, exp_msg)
5866 else:
5867 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5868 self.rollback_newvm(vapp_uuid, msg)
5869
5870
5871 def get_vcenter_content(self):
5872 """
5873 Get the vsphere content object
5874 """
5875 try:
5876 vm_vcenter_info = self.get_vm_vcenter_info()
5877 except Exception as exp:
5878 self.logger.error("Error occurred while getting vCenter infromationn"\
5879 " for VM : {}".format(exp))
5880 raise vimconn.vimconnException(message=exp)
5881
5882 context = None
5883 if hasattr(ssl, '_create_unverified_context'):
5884 context = ssl._create_unverified_context()
5885
5886 vcenter_conect = SmartConnect(
5887 host=vm_vcenter_info["vm_vcenter_ip"],
5888 user=vm_vcenter_info["vm_vcenter_user"],
5889 pwd=vm_vcenter_info["vm_vcenter_password"],
5890 port=int(vm_vcenter_info["vm_vcenter_port"]),
5891 sslContext=context
5892 )
5893 atexit.register(Disconnect, vcenter_conect)
5894 content = vcenter_conect.RetrieveContent()
5895 return vcenter_conect, content
5896
5897
5898 def get_vm_moref_id(self, vapp_uuid):
5899 """
5900 Get the moref_id of given VM
5901 """
5902 try:
5903 if vapp_uuid:
5904 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5905 if vm_details and "vm_vcenter_info" in vm_details:
5906 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5907 return vm_moref_id
5908
5909 except Exception as exp:
5910 self.logger.error("Error occurred while getting VM moref ID "\
5911 " for VM : {}".format(exp))
5912 return None
5913
5914
5915 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5916 """
5917 Method to get vApp template details
5918 Args :
5919 catalogs - list of VDC catalogs
5920 image_id - Catalog ID to find
5921 template_name : template name in catalog
5922 Returns:
5923 parsed_respond : dict of vApp tempalte details
5924 """
5925 parsed_response = {}
5926
5927 vca = self.connect_as_admin()
5928 if not vca:
5929 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5930
5931 try:
5932 org, vdc = self.get_vdc_details()
5933 catalog = self.get_catalog_obj(image_id, catalogs)
5934 if catalog:
5935 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5936 catalog_items = [items.attrib]
5937
5938 if len(catalog_items) == 1:
5939 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5940 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5941
5942 response = self.perform_request(req_type='GET',
5943 url=catalog_items[0].get('href'),
5944 headers=headers)
5945 catalogItem = XmlElementTree.fromstring(response.content)
5946 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5947 vapp_tempalte_href = entity.get("href")
5948 #get vapp details and parse moref id
5949
5950 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5951 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5952 'vmw': 'http://www.vmware.com/schema/ovf',
5953 'vm': 'http://www.vmware.com/vcloud/v1.5',
5954 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5955 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5956 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5957 }
5958
5959 if vca._session:
5960 response = self.perform_request(req_type='GET',
5961 url=vapp_tempalte_href,
5962 headers=headers)
5963
5964 if response.status_code != requests.codes.ok:
5965 self.logger.debug("REST API call {} failed. Return status code {}".format(
5966 vapp_tempalte_href, response.status_code))
5967
5968 else:
5969 xmlroot_respond = XmlElementTree.fromstring(response.content)
5970 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5971 if children_section is not None:
5972 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5973 if vCloud_extension_section is not None:
5974 vm_vcenter_info = {}
5975 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5976 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5977 if vmext is not None:
5978 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5979 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5980
5981 except Exception as exp :
5982 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5983
5984 return parsed_response
5985
5986
5987 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5988 """
5989 Method to delete vApp
5990 Args :
5991 vapp_uuid - vApp UUID
5992 msg - Error message to be logged
5993 exp_type : Exception type
5994 Returns:
5995 None
5996 """
5997 if vapp_uuid:
5998 status = self.delete_vminstance(vapp_uuid)
5999 else:
6000 msg = "No vApp ID"
6001 self.logger.error(msg)
6002 if exp_type == "Genric":
6003 raise vimconn.vimconnException(msg)
6004 elif exp_type == "NotFound":
6005 raise vimconn.vimconnNotFoundException(message=msg)
6006
6007 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
6008 """
6009 Method to attach SRIOV adapters to VM
6010
6011 Args:
6012 vapp_uuid - uuid of vApp/VM
6013 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
6014 vmname_andid - vmname
6015
6016 Returns:
6017 The status of add SRIOV adapter task , vm object and
6018 vcenter_conect object
6019 """
6020 vm_obj = None
6021 vcenter_conect, content = self.get_vcenter_content()
6022 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
6023
6024 if vm_moref_id:
6025 try:
6026 no_of_sriov_devices = len(sriov_nets)
6027 if no_of_sriov_devices > 0:
6028 #Get VM and its host
6029 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6030 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
6031 if host_obj and vm_obj:
6032 #get SRIOV devies from host on which vapp is currently installed
6033 avilable_sriov_devices = self.get_sriov_devices(host_obj,
6034 no_of_sriov_devices,
6035 )
6036
6037 if len(avilable_sriov_devices) == 0:
6038 #find other hosts with active pci devices
6039 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
6040 content,
6041 no_of_sriov_devices,
6042 )
6043
6044 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
6045 #Migrate vm to the host where SRIOV devices are available
6046 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
6047 new_host_obj))
6048 task = self.relocate_vm(new_host_obj, vm_obj)
6049 if task is not None:
6050 result = self.wait_for_vcenter_task(task, vcenter_conect)
6051 self.logger.info("Migrate VM status: {}".format(result))
6052 host_obj = new_host_obj
6053 else:
6054 self.logger.info("Fail to migrate VM : {}".format(result))
6055 raise vimconn.vimconnNotFoundException(
6056 "Fail to migrate VM : {} to host {}".format(
6057 vmname_andid,
6058 new_host_obj)
6059 )
6060
6061 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
6062 #Add SRIOV devices one by one
6063 for sriov_net in sriov_nets:
6064 network_name = sriov_net.get('net_id')
6065 dvs_portgr_name = self.create_dvPort_group(network_name)
6066 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
6067 #add vlan ID ,Modify portgroup for vlan ID
6068 self.configure_vlanID(content, vcenter_conect, network_name)
6069
6070 task = self.add_sriov_to_vm(content,
6071 vm_obj,
6072 host_obj,
6073 network_name,
6074 avilable_sriov_devices[0]
6075 )
6076 if task:
6077 status= self.wait_for_vcenter_task(task, vcenter_conect)
6078 if status:
6079 self.logger.info("Added SRIOV {} to VM {}".format(
6080 no_of_sriov_devices,
6081 str(vm_obj)))
6082 else:
6083 self.logger.error("Fail to add SRIOV {} to VM {}".format(
6084 no_of_sriov_devices,
6085 str(vm_obj)))
6086 raise vimconn.vimconnUnexpectedResponse(
6087 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
6088 )
6089 return True, vm_obj, vcenter_conect
6090 else:
6091 self.logger.error("Currently there is no host with"\
6092 " {} number of avaialble SRIOV "\
6093 "VFs required for VM {}".format(
6094 no_of_sriov_devices,
6095 vmname_andid)
6096 )
6097 raise vimconn.vimconnNotFoundException(
6098 "Currently there is no host with {} "\
6099 "number of avaialble SRIOV devices required for VM {}".format(
6100 no_of_sriov_devices,
6101 vmname_andid))
6102 else:
6103 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
6104
6105 except vmodl.MethodFault as error:
6106 self.logger.error("Error occurred while adding SRIOV {} ",error)
6107 return None, vm_obj, vcenter_conect
6108
6109
6110 def get_sriov_devices(self,host, no_of_vfs):
6111 """
6112 Method to get the details of SRIOV devices on given host
6113 Args:
6114 host - vSphere host object
6115 no_of_vfs - number of VFs needed on host
6116
6117 Returns:
6118 array of SRIOV devices
6119 """
6120 sriovInfo=[]
6121 if host:
6122 for device in host.config.pciPassthruInfo:
6123 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
6124 if device.numVirtualFunction >= no_of_vfs:
6125 sriovInfo.append(device)
6126 break
6127 return sriovInfo
6128
6129
6130 def get_host_and_sriov_devices(self, content, no_of_vfs):
6131 """
6132 Method to get the details of SRIOV devices infromation on all hosts
6133
6134 Args:
6135 content - vSphere host object
6136 no_of_vfs - number of pci VFs needed on host
6137
6138 Returns:
6139 array of SRIOV devices and host object
6140 """
6141 host_obj = None
6142 sriov_device_objs = None
6143 try:
6144 if content:
6145 container = content.viewManager.CreateContainerView(content.rootFolder,
6146 [vim.HostSystem], True)
6147 for host in container.view:
6148 devices = self.get_sriov_devices(host, no_of_vfs)
6149 if devices:
6150 host_obj = host
6151 sriov_device_objs = devices
6152 break
6153 except Exception as exp:
6154 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6155
6156 return host_obj,sriov_device_objs
6157
6158
6159 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6160 """
6161 Method to add SRIOV adapter to vm
6162
6163 Args:
6164 host_obj - vSphere host object
6165 vm_obj - vSphere vm object
6166 content - vCenter content object
6167 network_name - name of distributed virtaul portgroup
6168 sriov_device - SRIOV device info
6169
6170 Returns:
6171 task object
6172 """
6173 devices = []
6174 vnic_label = "sriov nic"
6175 try:
6176 dvs_portgr = self.get_dvport_group(network_name)
6177 network_name = dvs_portgr.name
6178 nic = vim.vm.device.VirtualDeviceSpec()
6179 # VM device
6180 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6181 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6182 nic.device.addressType = 'assigned'
6183 #nic.device.key = 13016
6184 nic.device.deviceInfo = vim.Description()
6185 nic.device.deviceInfo.label = vnic_label
6186 nic.device.deviceInfo.summary = network_name
6187 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6188
6189 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6190 nic.device.backing.deviceName = network_name
6191 nic.device.backing.useAutoDetect = False
6192 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6193 nic.device.connectable.startConnected = True
6194 nic.device.connectable.allowGuestControl = True
6195
6196 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6197 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6198 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6199
6200 devices.append(nic)
6201 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6202 task = vm_obj.ReconfigVM_Task(vmconf)
6203 return task
6204 except Exception as exp:
6205 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6206 return None
6207
6208
6209 def create_dvPort_group(self, network_name):
6210 """
6211 Method to create disributed virtual portgroup
6212
6213 Args:
6214 network_name - name of network/portgroup
6215
6216 Returns:
6217 portgroup key
6218 """
6219 try:
6220 new_network_name = [network_name, '-', str(uuid.uuid4())]
6221 network_name=''.join(new_network_name)
6222 vcenter_conect, content = self.get_vcenter_content()
6223
6224 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6225 if dv_switch:
6226 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6227 dv_pg_spec.name = network_name
6228
6229 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6230 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6231 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6232 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6233 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6234 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6235
6236 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6237 self.wait_for_vcenter_task(task, vcenter_conect)
6238
6239 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6240 if dvPort_group:
6241 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6242 return dvPort_group.key
6243 else:
6244 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6245
6246 except Exception as exp:
6247 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6248 " : {}".format(network_name, exp))
6249 return None
6250
6251 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6252 """
6253 Method to reconfigure disributed virtual portgroup
6254
6255 Args:
6256 dvPort_group_name - name of disributed virtual portgroup
6257 content - vCenter content object
6258 config_info - disributed virtual portgroup configuration
6259
6260 Returns:
6261 task object
6262 """
6263 try:
6264 dvPort_group = self.get_dvport_group(dvPort_group_name)
6265 if dvPort_group:
6266 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6267 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6268 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6269 if "vlanID" in config_info:
6270 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6271 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6272
6273 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6274 return task
6275 else:
6276 return None
6277 except Exception as exp:
6278 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6279 " : {}".format(dvPort_group_name, exp))
6280 return None
6281
6282
6283 def destroy_dvport_group(self , dvPort_group_name):
6284 """
6285 Method to destroy disributed virtual portgroup
6286
6287 Args:
6288 network_name - name of network/portgroup
6289
6290 Returns:
6291 True if portgroup successfully got deleted else false
6292 """
6293 vcenter_conect, content = self.get_vcenter_content()
6294 try:
6295 status = None
6296 dvPort_group = self.get_dvport_group(dvPort_group_name)
6297 if dvPort_group:
6298 task = dvPort_group.Destroy_Task()
6299 status = self.wait_for_vcenter_task(task, vcenter_conect)
6300 return status
6301 except vmodl.MethodFault as exp:
6302 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6303 exp, dvPort_group_name))
6304 return None
6305
6306
6307 def get_dvport_group(self, dvPort_group_name):
6308 """
6309 Method to get disributed virtual portgroup
6310
6311 Args:
6312 network_name - name of network/portgroup
6313
6314 Returns:
6315 portgroup object
6316 """
6317 vcenter_conect, content = self.get_vcenter_content()
6318 dvPort_group = None
6319 try:
6320 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6321 for item in container.view:
6322 if item.key == dvPort_group_name:
6323 dvPort_group = item
6324 break
6325 return dvPort_group
6326 except vmodl.MethodFault as exp:
6327 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6328 exp, dvPort_group_name))
6329 return None
6330
6331 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6332 """
6333 Method to get disributed virtual portgroup vlanID
6334
6335 Args:
6336 network_name - name of network/portgroup
6337
6338 Returns:
6339 vlan ID
6340 """
6341 vlanId = None
6342 try:
6343 dvPort_group = self.get_dvport_group(dvPort_group_name)
6344 if dvPort_group:
6345 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6346 except vmodl.MethodFault as exp:
6347 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6348 exp, dvPort_group_name))
6349 return vlanId
6350
6351
6352 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6353 """
6354 Method to configure vlanID in disributed virtual portgroup vlanID
6355
6356 Args:
6357 network_name - name of network/portgroup
6358
6359 Returns:
6360 None
6361 """
6362 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6363 if vlanID == 0:
6364 #configure vlanID
6365 vlanID = self.genrate_vlanID(dvPort_group_name)
6366 config = {"vlanID":vlanID}
6367 task = self.reconfig_portgroup(content, dvPort_group_name,
6368 config_info=config)
6369 if task:
6370 status= self.wait_for_vcenter_task(task, vcenter_conect)
6371 if status:
6372 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6373 dvPort_group_name,vlanID))
6374 else:
6375 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6376 dvPort_group_name, vlanID))
6377
6378
6379 def genrate_vlanID(self, network_name):
6380 """
6381 Method to get unused vlanID
6382 Args:
6383 network_name - name of network/portgroup
6384 Returns:
6385 vlanID
6386 """
6387 vlan_id = None
6388 used_ids = []
6389 if self.config.get('vlanID_range') == None:
6390 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6391 "at config value before creating sriov network with vlan tag")
6392 if "used_vlanIDs" not in self.persistent_info:
6393 self.persistent_info["used_vlanIDs"] = {}
6394 else:
6395 used_ids = self.persistent_info["used_vlanIDs"].values()
6396 #For python3
6397 #used_ids = list(self.persistent_info["used_vlanIDs"].values())
6398
6399 for vlanID_range in self.config.get('vlanID_range'):
6400 start_vlanid , end_vlanid = vlanID_range.split("-")
6401 if start_vlanid > end_vlanid:
6402 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6403 vlanID_range))
6404
6405 for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
6406 #For python3
6407 #for id in range(int(start_vlanid), int(end_vlanid) + 1):
6408 if id not in used_ids:
6409 vlan_id = id
6410 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6411 return vlan_id
6412 if vlan_id is None:
6413 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6414
6415
6416 def get_obj(self, content, vimtype, name):
6417 """
6418 Get the vsphere object associated with a given text name
6419 """
6420 obj = None
6421 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6422 for item in container.view:
6423 if item.name == name:
6424 obj = item
6425 break
6426 return obj
6427
6428
6429 def insert_media_to_vm(self, vapp, image_id):
6430 """
6431 Method to insert media CD-ROM (ISO image) from catalog to vm.
6432 vapp - vapp object to get vm id
6433 Image_id - image id for cdrom to be inerted to vm
6434 """
6435 # create connection object
6436 vca = self.connect()
6437 try:
6438 # fetching catalog details
6439 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6440 if vca._session:
6441 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6442 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6443 response = self.perform_request(req_type='GET',
6444 url=rest_url,
6445 headers=headers)
6446
6447 if response.status_code != 200:
6448 self.logger.error("REST call {} failed reason : {}"\
6449 "status code : {}".format(url_rest_call,
6450 response.content,
6451 response.status_code))
6452 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6453 "catalog details")
6454 # searching iso name and id
6455 iso_name,media_id = self.get_media_details(vca, response.content)
6456
6457 if iso_name and media_id:
6458 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6459 <ns6:MediaInsertOrEjectParams
6460 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6461 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6462 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6463 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6464 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6465 xmlns:ns7="http://www.vmware.com/schema/ovf"
6466 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6467 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6468 <ns6:Media
6469 type="application/vnd.vmware.vcloud.media+xml"
6470 name="{}"
6471 id="urn:vcloud:media:{}"
6472 href="https://{}/api/media/{}"/>
6473 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6474 self.url,media_id)
6475
6476 for vms in vapp.get_all_vms():
6477 vm_id = vms.get('id').split(':')[-1]
6478
6479 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6480 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6481
6482 response = self.perform_request(req_type='POST',
6483 url=rest_url,
6484 data=data,
6485 headers=headers)
6486
6487 if response.status_code != 202:
6488 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6489 "Status code {}".format(response.text, response.status_code)
6490 self.logger.error(error_msg)
6491 raise vimconn.vimconnException(error_msg)
6492 else:
6493 task = self.get_task_from_response(response.content)
6494 result = self.client.get_task_monitor().wait_for_success(task=task)
6495 if result.get('status') == 'success':
6496 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6497 " image to vm {}".format(vm_id))
6498
6499 except Exception as exp:
6500 self.logger.error("insert_media_to_vm() : exception occurred "\
6501 "while inserting media CD-ROM")
6502 raise vimconn.vimconnException(message=exp)
6503
6504
6505 def get_media_details(self, vca, content):
6506 """
6507 Method to get catalog item details
6508 vca - connection object
6509 content - Catalog details
6510 Return - Media name, media id
6511 """
6512 cataloghref_list = []
6513 try:
6514 if content:
6515 vm_list_xmlroot = XmlElementTree.fromstring(content)
6516 for child in vm_list_xmlroot.iter():
6517 if 'CatalogItem' in child.tag:
6518 cataloghref_list.append(child.attrib.get('href'))
6519 if cataloghref_list is not None:
6520 for href in cataloghref_list:
6521 if href:
6522 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6523 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6524 response = self.perform_request(req_type='GET',
6525 url=href,
6526 headers=headers)
6527 if response.status_code != 200:
6528 self.logger.error("REST call {} failed reason : {}"\
6529 "status code : {}".format(href,
6530 response.content,
6531 response.status_code))
6532 raise vimconn.vimconnException("get_media_details : Failed to get "\
6533 "catalogitem details")
6534 list_xmlroot = XmlElementTree.fromstring(response.content)
6535 for child in list_xmlroot.iter():
6536 if 'Entity' in child.tag:
6537 if 'media' in child.attrib.get('href'):
6538 name = child.attrib.get('name')
6539 media_id = child.attrib.get('href').split('/').pop()
6540 return name,media_id
6541 else:
6542 self.logger.debug("Media name and id not found")
6543 return False,False
6544 except Exception as exp:
6545 self.logger.error("get_media_details : exception occurred "\
6546 "getting media details")
6547 raise vimconn.vimconnException(message=exp)
6548
6549
6550 def retry_rest(self, method, url, add_headers=None, data=None):
6551 """ Method to get Token & retry respective REST request
6552 Args:
6553 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6554 url - request url to be used
6555 add_headers - Additional headers (optional)
6556 data - Request payload data to be passed in request
6557 Returns:
6558 response - Response of request
6559 """
6560 response = None
6561
6562 #Get token
6563 self.get_token()
6564
6565 if self.client._session:
6566 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6567 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6568
6569 if add_headers:
6570 headers.update(add_headers)
6571
6572 if method == 'GET':
6573 response = self.perform_request(req_type='GET',
6574 url=url,
6575 headers=headers)
6576 elif method == 'PUT':
6577 response = self.perform_request(req_type='PUT',
6578 url=url,
6579 headers=headers,
6580 data=data)
6581 elif method == 'POST':
6582 response = self.perform_request(req_type='POST',
6583 url=url,
6584 headers=headers,
6585 data=data)
6586 elif method == 'DELETE':
6587 response = self.perform_request(req_type='DELETE',
6588 url=url,
6589 headers=headers)
6590 return response
6591
6592
6593 def get_token(self):
6594 """ Generate a new token if expired
6595
6596 Returns:
6597 The return client object that letter can be used to connect to vCloud director as admin for VDC
6598 """
6599 try:
6600 self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
6601 self.user,
6602 self.org_name))
6603 host = self.url
6604 client = Client(host, verify_ssl_certs=False)
6605 client.set_highest_supported_version()
6606 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
6607 # connection object
6608 self.client = client
6609
6610 except:
6611 raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
6612 "{} as user: {}".format(self.org_name, self.user))
6613
6614 if not client:
6615 raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
6616
6617
6618 def get_vdc_details(self):
6619 """ Get VDC details using pyVcloud Lib
6620
6621 Returns org and vdc object
6622 """
6623 vdc = None
6624 try:
6625 org = Org(self.client, resource=self.client.get_org())
6626 vdc = org.get_vdc(self.tenant_name)
6627 except Exception as e:
6628 # pyvcloud not giving a specific exception, Refresh nevertheless
6629 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6630
6631 #Retry once, if failed by refreshing token
6632 if vdc is None:
6633 self.get_token()
6634 org = Org(self.client, resource=self.client.get_org())
6635 vdc = org.get_vdc(self.tenant_name)
6636
6637 return org, vdc
6638
6639
6640 def perform_request(self, req_type, url, headers=None, data=None):
6641 """Perform the POST/PUT/GET/DELETE request."""
6642
6643 #Log REST request details
6644 self.log_request(req_type, url=url, headers=headers, data=data)
6645 # perform request and return its result
6646 if req_type == 'GET':
6647 response = requests.get(url=url,
6648 headers=headers,
6649 verify=False)
6650 elif req_type == 'PUT':
6651 response = requests.put(url=url,
6652 headers=headers,
6653 data=data,
6654 verify=False)
6655 elif req_type == 'POST':
6656 response = requests.post(url=url,
6657 headers=headers,
6658 data=data,
6659 verify=False)
6660 elif req_type == 'DELETE':
6661 response = requests.delete(url=url,
6662 headers=headers,
6663 verify=False)
6664 #Log the REST response
6665 self.log_response(response)
6666
6667 return response
6668
6669
6670 def log_request(self, req_type, url=None, headers=None, data=None):
6671 """Logs REST request details"""
6672
6673 if req_type is not None:
6674 self.logger.debug("Request type: {}".format(req_type))
6675
6676 if url is not None:
6677 self.logger.debug("Request url: {}".format(url))
6678
6679 if headers is not None:
6680 for header in headers:
6681 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6682
6683 if data is not None:
6684 self.logger.debug("Request data: {}".format(data))
6685
6686
6687 def log_response(self, response):
6688 """Logs REST response details"""
6689
6690 self.logger.debug("Response status code: {} ".format(response.status_code))
6691
6692
6693 def get_task_from_response(self, content):
6694 """
6695 content - API response content(response.content)
6696 return task object
6697 """
6698 xmlroot = XmlElementTree.fromstring(content)
6699 if xmlroot.tag.split('}')[1] == "Task":
6700 return xmlroot
6701 else:
6702 for ele in xmlroot:
6703 if ele.tag.split("}")[1] == "Tasks":
6704 task = ele[0]
6705 break
6706 return task
6707
6708
6709 def power_on_vapp(self,vapp_id, vapp_name):
6710 """
6711 vapp_id - vApp uuid
6712 vapp_name - vAapp name
6713 return - Task object
6714 """
6715 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6716 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6717
6718 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6719 vapp_id)
6720 response = self.perform_request(req_type='POST',
6721 url=poweron_href,
6722 headers=headers)
6723
6724 if response.status_code != 202:
6725 self.logger.error("REST call {} failed reason : {}"\
6726 "status code : {} ".format(poweron_href,
6727 response.content,
6728 response.status_code))
6729 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6730 "vApp {}".format(vapp_name))
6731 else:
6732 poweron_task = self.get_task_from_response(response.content)
6733 return poweron_task
6734
6735