| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 1 | # |
| 2 | # Copyright 2016 RIFT.IO Inc |
| 3 | # |
| 4 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | # you may not use this file except in compliance with the License. |
| 6 | # You may obtain a copy of the License at |
| 7 | # |
| 8 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | # |
| 10 | # Unless required by applicable law or agreed to in writing, software |
| 11 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | # See the License for the specific language governing permissions and |
| 14 | # limitations under the License. |
| 15 | # |
| 16 | # |
| 17 | |
| 18 | import os,sys,platform |
| 19 | import socket |
| 20 | import time |
| 21 | import re |
| 22 | import logging |
| 23 | |
| 24 | from pprint import pprint |
| 25 | import argparse |
| 26 | |
| 27 | from gi.repository import RwcalYang |
| 28 | from gi.repository.RwTypes import RwStatus |
| 29 | import rw_peas |
| 30 | import rwlogger |
| 31 | import time |
| 32 | |
| 33 | global nova |
| 34 | nova = None |
| 35 | |
| 36 | def wait_till_active(driver, account, vm_id_list, timeout): |
| 37 | """ |
| 38 | Wait until VM reaches ACTIVE state. |
| 39 | """ |
| 40 | # Wait while VM goes to required state |
| 41 | |
| 42 | start = time.time() |
| 43 | end = time.time() + timeout |
| 44 | done = False; |
| 45 | |
| 46 | while ( time.time() < end ) and not done: |
| 47 | done = True |
| 48 | for vm_id in vm_id_list: |
| 49 | rc, rs = driver.get_vm(account, vm_id) |
| 50 | assert rc == RwStatus.SUCCESS |
| 51 | if rs.state != 'ACTIVE': |
| 52 | done = False |
| 53 | time.sleep(2) |
| 54 | |
| 55 | |
| 56 | def get_image_name(node): |
| 57 | images = driver.list_images() |
| 58 | for i in images: |
| 59 | if i.id == node.extra['imageId']: |
| 60 | return i.name |
| 61 | return None |
| 62 | |
| 63 | def get_flavor_name(flavorid): |
| 64 | global nova |
| 65 | if nova is None: |
| 66 | nova = ra_nova_connect(project='admin') |
| 67 | for f in nova.flavors.list(True): |
| 68 | if f.id == flavorid: |
| 69 | return f.name |
| 70 | return None |
| 71 | |
| 72 | def hostname(): |
| 73 | return socket.gethostname().split('.')[0] |
| 74 | |
| 75 | def vm_register(id, driver, account, cmdargs, header=True): |
| 76 | if testbed is None: |
| 77 | print("Cannot register VM without reservation system") |
| 78 | return False |
| 79 | |
| 80 | if cmdargs.reserve_new_vms: |
| 81 | user=os.environ['USER'] |
| 82 | else: |
| 83 | user=None |
| 84 | fmt="%-28s %-12s %-12s %-15s" |
| 85 | if header: |
| 86 | print('VM controller compute mgmt ip') |
| 87 | print('---------------------------- ------------ ------------ ---------------') |
| 88 | rc, nodes = driver.get_vm_list(account) |
| 89 | assert rc == RwStatus.SUCCESS |
| 90 | for node in nodes.vminfo_list: |
| 91 | if id == 'all' or node.vm_id == id: |
| 92 | flavor = driver.get_flavor(account, node.flavor_id) |
| 93 | assert rc == RwStatus.SUCCESS |
| 94 | ip = node.management_ip |
| 95 | |
| 96 | huge = 'DISABLED' |
| 97 | if flavor.guest_epa.mempage_size == 'LARGE': |
| 98 | huge = flavor.guest_epa.mempage_size |
| 99 | #compute = utils.find_resource(nova.servers, node.id) |
| 100 | #compute_name = compute._info['OS-EXT-SRV-ATTR:hypervisor_hostname'].split('.')[0] |
| 101 | compute_name = hostname() |
| 102 | try: |
| 103 | testbed.add_resource(node.vm_name, hostname(), ip, flavor.vm_flavor.memory_mb, flavor.vm_flavor.vcpu_count, user, flavor.name, compute=compute_name, huge_pages=huge ) |
| 104 | print(fmt % ( node.vm_name, hostname(), compute_name, ip )) |
| 105 | except Exception as e: |
| 106 | print("WARNING: Error \"%s\"adding resource to reservation system" % e) |
| 107 | |
| 108 | class OFromDict(object): |
| 109 | def __init__(self, d): |
| 110 | self.__dict__ = d |
| 111 | |
| 112 | |
| 113 | def vm_create_subcommand(driver, account, cmdargs): |
| 114 | """Process the VM create subcommand.""" |
| 115 | if cmdargs.name and cmdargs.count != 1: |
| 116 | sys.exit("Error: when VM name is specified, the count must be 1") |
| 117 | |
| 118 | rc, sizes = driver.get_flavor_list(account) |
| 119 | assert rc == RwStatus.SUCCESS |
| 120 | |
| 121 | try: |
| 122 | size = [s for s in sizes.flavorinfo_list if s.name == cmdargs.flavor][0] |
| 123 | except IndexError: |
| 124 | sys.exit("Error: Failed to create VM, couldn't find flavor %s" % \ |
| 125 | cmdargs.flavor) |
| 126 | print(size) |
| 127 | rc, images = driver.get_image_list(account) |
| 128 | assert rc == RwStatus.SUCCESS |
| 129 | if images is None: |
| 130 | sys.exit("Error: No images found") |
| 131 | try: |
| 132 | image = [i for i in images.imageinfo_list if cmdargs.image in i.name][0] |
| 133 | except IndexError: |
| 134 | sys.exit("Error: Failed to create VM, couldn't find image %s" % \ |
| 135 | cmdargs.image) |
| 136 | print(image) |
| 137 | |
| 138 | # VM name is not specified, so determine a unique VM name |
| 139 | # VM name should have the following format: |
| 140 | # rwopenstack_<host>_vm<id>, e.g., rwopenstack_grunt16_vm1 |
| 141 | # The following code gets the list of existing VMs and determines |
| 142 | # a unique id for the VM name construction. |
| 143 | rc, nodes = driver.get_vm_list(account) |
| 144 | assert rc == RwStatus.SUCCESS |
| 145 | prefix = 'rwopenstack_%s_vm' % hostname() |
| 146 | vmid = 0; |
| 147 | for n in nodes.vminfo_list: |
| 148 | if n.vm_name.startswith(prefix): |
| 149 | temp_str = n.vm_name[len(prefix):] |
| 150 | if temp_str == '': |
| 151 | temp = 1 |
| 152 | else: |
| 153 | temp = int(n.vm_name[len(prefix):]) |
| 154 | |
| 155 | if (temp > vmid): |
| 156 | vmid = temp |
| 157 | |
| 158 | nodelist = [] |
| 159 | for i in range(0, cmdargs.count): |
| 160 | if cmdargs.name: |
| 161 | vm_name = cmdargs.name |
| 162 | else: |
| 163 | vm_name = '%s%d' % (prefix, vmid+i+1) |
| 164 | |
| 165 | rc, netlist = driver.get_network_list(account) |
| 166 | assert rc == RwStatus.SUCCESS |
| 167 | for network in netlist.networkinfo_list: |
| 168 | print(network) |
| 169 | |
| Jeremy Mordkoff | 4870d0e | 2017-09-30 20:28:33 -0400 | [diff] [blame] | 170 | vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList() |
| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 171 | vm.vm_name = vm_name |
| 172 | vm.flavor_id = size.id |
| 173 | vm.image_id = image.id |
| 174 | vm.cloud_init.userdata = '' |
| 175 | |
| 176 | nets = dict() |
| 177 | for network in netlist.networkinfo_list: |
| 178 | if network.network_name != "public": |
| Jeremy Mordkoff | 4870d0e | 2017-09-30 20:28:33 -0400 | [diff] [blame] | 179 | nwitem = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList_NetworkList() |
| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 180 | nwitem.network_id = network.network_id |
| 181 | nets[network.network_name] = nwitem |
| 182 | |
| 183 | logger.debug('creating VM using nets %s' % cmdargs.networks ) |
| 184 | for net in cmdargs.networks.split(','): |
| 185 | if not net in nets: |
| 186 | print(("Invalid network name '%s'" % net)) |
| 187 | print(('available nets are %s' % ','.join(list(nets.keys())) )) |
| 188 | sys.exit(1) |
| 189 | if net != cmdargs.mgmt_network: |
| 190 | vm.network_list.append(nets[net]) |
| 191 | |
| 192 | print(vm.network_list) |
| 193 | rc, node_id = driver.create_vm(account, vm) |
| 194 | |
| 195 | # wait for 1 to be up before starting the rest |
| 196 | # this is an attempt to make sure the image is cached |
| 197 | nodelist.append(node_id) |
| 198 | if i == 0 or cmdargs.wait_after_create is True: |
| 199 | #wait_until_running([node], timeout=300) |
| 200 | wait_till_active(driver, account, nodelist, timeout=300) |
| 201 | print(node_id) |
| 202 | if cmdargs.reservation_server_url is not None: |
| 203 | if not cmdargs.wait_after_create: |
| 204 | print("Waiting for VMs to start") |
| 205 | wait_till_active(driver, account, nodelist, timeout=300) |
| 206 | print("VMs are up") |
| 207 | header=True |
| 208 | for node in nodelist: |
| 209 | vm_register(node, driver, account, cmdargs, header) |
| 210 | header=False |
| 211 | |
| 212 | |
| 213 | def vm_destroy_subcommand(driver, account, cmdargs): |
| 214 | rc, nodes = driver.get_vm_list(account) |
| 215 | assert rc == RwStatus.SUCCESS |
| 216 | ct = len(nodes.vminfo_list) |
| 217 | if cmdargs.destroy_all or cmdargs.wait: |
| 218 | rc=0 |
| 219 | for n in nodes.vminfo_list: |
| 220 | if testbed is not None: |
| 221 | try: |
| 222 | testbed.remove_resource(n.vm_name) |
| 223 | except: |
| 224 | print("WARNING: error deleting resource from reservation system") |
| 225 | if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id): |
| 226 | print('Error: failed to destroy node %s' % n.vm_name) |
| 227 | rc=1 |
| 228 | if rc: |
| 229 | sys.exit(1) |
| 230 | if cmdargs.wait: |
| 231 | while ct > 0: |
| 232 | sys.stderr.write("waiting for %d VMs to exit...\n" % ct) |
| 233 | time.sleep(1) |
| 234 | try: |
| 235 | rc, nodesnw = driver.get_vm_list(account) |
| 236 | assert rc == RwStatus.SUCCESS |
| 237 | ct = len(nodesnw.vminfo_list ) |
| 238 | except: |
| 239 | pass |
| 240 | |
| 241 | else: |
| 242 | vm_re = re.compile('^%s$' % cmdargs.vm_name) |
| 243 | ct = 0 |
| 244 | for n in nodes.vminfo_list: |
| 245 | if vm_re.match(n.vm_name): |
| 246 | ct += 1 |
| 247 | if testbed is not None: |
| 248 | try: |
| 249 | testbed.remove_resource(n.vm_name) |
| 250 | except: |
| 251 | print("WARNING: error deleting resource from reservation system") |
| 252 | if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id): |
| 253 | print('Error: failed to destroy node %s' % n.vm_name) |
| 254 | return |
| 255 | print('destroyed %s' % n.vm_name) |
| 256 | if ct == 0: |
| 257 | print("No VMs matching \"%s\" found" % ( cmdargs.vm_name )) |
| 258 | |
| 259 | |
| 260 | def vm_rebuild_subcommand(driver, account, cmdargs): |
| 261 | images = driver.list_images() |
| 262 | found=0 |
| 263 | for i in images: |
| 264 | if i.name == cmdargs.image_name: |
| 265 | found=1 |
| 266 | break |
| 267 | if found != 1: |
| 268 | print('Error: Rebuild failed - image %s not found' % cmdargs.image_name) |
| 269 | sys.exit(1) |
| 270 | image=i |
| 271 | nodes = driver.list_nodes() |
| 272 | if cmdargs.rebuild_all: |
| 273 | rc=0 |
| 274 | for n in nodes: |
| 275 | if not driver.ex_rebuild(n,image): |
| 276 | print('Error: failed to rebuild node %s' % n.name) |
| 277 | rc=1 |
| 278 | if rc: |
| 279 | sys.exit(1) |
| 280 | rebuilt=0 |
| 281 | while rebuilt != 1: |
| 282 | time.sleep(10) |
| 283 | nw_nodes = driver.list_nodes() |
| 284 | for nw in nw_nodes: |
| 285 | if nw.name == n.name: |
| 286 | if nw.state == n.state: |
| 287 | rebuilt=1 |
| 288 | break |
| 289 | else: |
| 290 | vm_re = re.compile('^%s$' % cmdargs.vm_name) |
| 291 | ct = 0 |
| 292 | for n in nodes: |
| 293 | if vm_re.match(n.name): |
| 294 | ct += 1 |
| 295 | if not driver.ex_rebuild(n,image): |
| 296 | print('Error: failed to rebuild node %s' % n.name) |
| 297 | return |
| 298 | print('Rebuilt %s' % n.name) |
| 299 | rebuilt=0 |
| 300 | while rebuilt != 1: |
| 301 | time.sleep(10) |
| 302 | nw_nodes = driver.list_nodes() |
| 303 | for nw in nw_nodes: |
| 304 | if nw.name == n.name: |
| 305 | if nw.state == n.state: |
| 306 | rebuilt=1 |
| 307 | break |
| 308 | if ct == 0: |
| 309 | print("No VMs matching \"%s\" found" % ( cmdargs.vm_name )) |
| 310 | |
| 311 | |
| 312 | |
| 313 | def vm_reboot_subcommand(driver, account, cmdargs): |
| 314 | rc, nodes = driver.get_vm_list(account) |
| 315 | assert rc == RwStatus.SUCCESS |
| 316 | if cmdargs.reboot_all: |
| 317 | for n in nodes.vminfo_list: |
| 318 | ''' |
| 319 | if not n.reboot(): |
| 320 | print 'Error: failed to reboot node %s' % n.name |
| 321 | else: |
| 322 | print "rebooted %s" % n.name |
| 323 | ''' |
| 324 | time.sleep(cmdargs.sleep_time) |
| 325 | else: |
| 326 | for n in nodes.vminfo_list: |
| 327 | if n.vm_name == cmdargs.vm_name: |
| 328 | if RwStatus.SUCCESS != driver.reboot_vm(account,n.vm_id): |
| 329 | print('Error: failed to reboot node %s' % n.vm_name) |
| 330 | else: |
| 331 | print("rebooted %s" % n.vm_name) |
| 332 | |
| 333 | |
| 334 | def vm_start_subcommand(driver, account, cmdargs): |
| 335 | rc, nodes = driver.get_vm_list(account) |
| 336 | assert rc == RwStatus.SUCCESS |
| 337 | if cmdargs.start_all: |
| 338 | for n in nodes.vminfo_list: |
| 339 | print(dir(n)) |
| 340 | if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id): |
| 341 | print('Error: failed to start node %s' % n.vm_name) |
| 342 | else: |
| 343 | print("started %s" % n.vm_name) |
| 344 | else: |
| 345 | for n in nodes.vminfo_list: |
| 346 | if n.vm_name == cmdargs.vm_name: |
| 347 | if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id): |
| 348 | print('Error: failed to start node %s' % n.vm_name) |
| 349 | else: |
| 350 | print("started %s" % n.vm_name) |
| 351 | |
| 352 | def vm_subcommand(driver, account, cmdargs): |
| 353 | """Process the vm subcommand""" |
| 354 | |
| 355 | if cmdargs.which == 'list': |
| 356 | rc, nodes = driver.get_vm_list(account) |
| 357 | assert rc == RwStatus.SUCCESS |
| 358 | for n in nodes.vminfo_list: |
| 359 | print(n) |
| 360 | if n.state == 4: |
| 361 | if not cmdargs.ipsonly: |
| 362 | print("%s is shutoff" % n.vm_name) |
| 363 | elif cmdargs.ipsonly: |
| 364 | i = n.management_ip |
| 365 | if i is not None: |
| 366 | print(i) |
| 367 | else: |
| 368 | if n.management_ip is not None: |
| 369 | if len(n.private_ip_list) > 0: |
| 370 | print("%s %s,%s" % (n.vm_name, n.management_ip, ",".join([i.get_ip_address() for i in n.private_ip_list]))) |
| 371 | else: |
| 372 | print("%s %s" % (n.vm_name, n.management_ip)) |
| 373 | else: |
| 374 | print("%s NO IP" % n.vm_name) |
| 375 | |
| 376 | elif cmdargs.which == 'create': |
| 377 | vm_create_subcommand(driver, account, cmdargs) |
| 378 | |
| 379 | elif cmdargs.which == 'reboot': |
| 380 | vm_reboot_subcommand(driver, account, cmdargs) |
| 381 | elif cmdargs.which == 'start': |
| 382 | vm_start_subcommand(driver, account, cmdargs) |
| 383 | elif cmdargs.which == 'destroy': |
| 384 | vm_destroy_subcommand(driver, account, cmdargs) |
| 385 | #elif cmdargs.which == 'rebuild': |
| 386 | # vm_rebuild_subcommand(driver, account, cmdargs) |
| 387 | |
| 388 | def image_delete_subcommand(driver, account, cmdargs): |
| 389 | rc,images = driver.get_image_list(account) |
| 390 | assert rc == RwStatus.SUCCESS |
| 391 | account.openstack.key = 'admin' |
| 392 | if cmdargs.delete_all: |
| 393 | for i in images.imageinfo_list: |
| 394 | if RwStatus.SUCCESS != driver.delete_image(account, i.id): |
| 395 | print('Error: failed to delete image %s' % i.name) |
| 396 | else: |
| 397 | for i in images.imageinfo_list: |
| 398 | if i.name == cmdargs.image_name: |
| 399 | if RwStatus.SUCCESS != driver.delete_image(account, i.id): |
| 400 | print('Error: failed to delete image %s' % i.name) |
| 401 | |
| 402 | def image_subcommand(driver, account, cmdargs): |
| 403 | """Process the image subcommand""" |
| 404 | if cmdargs.which == 'list': |
| 405 | rc, images = driver.get_image_list(account) |
| 406 | assert rc == RwStatus.SUCCESS |
| 407 | |
| 408 | for i in images.imageinfo_list: |
| 409 | print(i) |
| 410 | |
| 411 | elif cmdargs.which == 'delete': |
| 412 | image_delete_subcommand(driver, account, cmdargs) |
| 413 | |
| 414 | elif cmdargs.which == 'create': |
| 415 | account.openstack.key = 'admin' |
| 416 | rc, images = driver.get_image_list(account) |
| 417 | assert rc == RwStatus.SUCCESS |
| 418 | for i in images.imageinfo_list: |
| 419 | if i.name == cmdargs.image_name: |
| 420 | print("FATAL: image \"%s\" already exists" % cmdargs.image_name) |
| 421 | return 1 |
| 422 | |
| 423 | print("creating image \"%s\" using %s ..." % \ |
| 424 | (cmdargs.image_name, cmdargs.file_name)) |
| Jeremy Mordkoff | 4870d0e | 2017-09-30 20:28:33 -0400 | [diff] [blame] | 425 | img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList() |
| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 426 | img.name = cmdargs.image_name |
| 427 | img.location = cmdargs.file_name |
| 428 | img.disk_format = "qcow2" |
| 429 | img.container_format = "bare" |
| 430 | rc, img_id = driver.create_image(account, img) |
| 431 | print("... done. image_id is %s" % img_id) |
| 432 | return img_id |
| 433 | |
| 434 | elif cmdargs.which == 'getid': |
| 435 | rc, images = driver.get_image_list(account) |
| 436 | assert rc == RwStatus.SUCCESS |
| 437 | found=0 |
| 438 | for i in images.imageinfo_list: |
| 439 | if i.name == cmdargs.image_name: |
| 440 | print(i.id) |
| 441 | found += 1 |
| 442 | if found != 1: |
| 443 | sys.exit(1) |
| 444 | |
| 445 | def flavor_subcommand(driver, account, cmdargs): |
| 446 | """Process the flavor subcommand""" |
| 447 | if cmdargs.which == 'list': |
| 448 | rc, sizes = driver.get_flavor_list(account) |
| 449 | assert rc == RwStatus.SUCCESS |
| 450 | for f in sizes.flavorinfo_list: |
| 451 | rc, flv = driver.get_flavor(account, f.id) |
| 452 | print(flv) |
| 453 | elif cmdargs.which == 'create': |
| 454 | account.openstack.key = 'admin' |
| Jeremy Mordkoff | 4870d0e | 2017-09-30 20:28:33 -0400 | [diff] [blame] | 455 | flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList() |
| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 456 | flavor.name = cmdargs.flavor_name |
| 457 | flavor.vm_flavor.memory_mb = cmdargs.memory_size |
| 458 | flavor.vm_flavor.vcpu_count = cmdargs.vcpu_count |
| 459 | flavor.vm_flavor.storage_gb = cmdargs.disc_size |
| 460 | if cmdargs.hugepages_kilo: |
| 461 | flavor.guest_epa.mempage_size = cmdargs.hugepages_kilo |
| 462 | if cmdargs.numa_nodes: |
| 463 | flavor.guest_epa.numa_node_policy.node_cnt = cmdargs.numa_nodes |
| 464 | if cmdargs.dedicated_cpu: |
| 465 | flavor.guest_epa.cpu_pinning_policy = 'DEDICATED' |
| 466 | if cmdargs.pci_count: |
| 467 | dev = flavor.guest_epa.pcie_device.add() |
| 468 | dev.device_id = 'PCI_%dG_ALIAS' % (cmdargs.pci_speed) |
| 469 | dev.count = cmdargs.pci_count |
| 470 | if cmdargs.colleto: |
| 471 | dev = flavor.guest_epa.pcie_device.add() |
| 472 | dev.device_id = 'COLETO_VF_ALIAS' |
| 473 | dev.count = cmdargs.colleto |
| 474 | if cmdargs.trusted_host: |
| 475 | flavor.guest_epa.trusted_execution = True |
| 476 | |
| 477 | rc, flavor_id = driver.create_flavor(account, flavor) |
| 478 | assert rc == RwStatus.SUCCESS |
| 479 | |
| 480 | print("created flavor %s id %s" % (cmdargs.flavor_name, flavor_id)) |
| 481 | |
| 482 | elif cmdargs.which == 'delete': |
| 483 | account.openstack.key = 'admin' |
| 484 | rc, sizes = driver.get_flavor_list(account) |
| 485 | assert rc == RwStatus.SUCCESS |
| 486 | for f in sizes.flavorinfo_list: |
| 487 | if f.name == cmdargs.flavor_name: |
| 488 | rc = driver.delete_flavor(account, f.id) |
| 489 | assert rc == RwStatus.SUCCESS |
| 490 | |
| 491 | def hostagg_subcommand(driver, account, cmdargs): |
| 492 | """Process the hostagg subcommand""" |
| 493 | if cmdargs.which == 'list': |
| 494 | nova = ra_nova_connect(project='admin') |
| 495 | for f in nova.aggregates.list(): |
| 496 | print("%-12s %-12s" % \ |
| 497 | (f.name, f.availability_zone)) |
| 498 | |
| 499 | elif cmdargs.which == 'create': |
| 500 | nova = ra_nova_connect(project='admin') |
| 501 | hostagg = nova.aggregates.create(cmdargs.hostagg_name, |
| 502 | cmdargs.avail_zone) |
| 503 | print("created hostagg %s in %s" % (hostagg.name, hostagg.availability_zone)) |
| 504 | |
| 505 | elif cmdargs.which == 'delete': |
| 506 | nova = ra_nova_connect(project='admin') |
| 507 | for f in nova.aggregates.list(): |
| 508 | if f.name == cmdargs.hostagg_name: |
| 509 | if cmdargs.force_delete_hosts: |
| 510 | for h in f.hosts: |
| 511 | f.remove_host(h) |
| 512 | |
| 513 | f.delete() |
| 514 | |
| 515 | elif cmdargs.which == 'addhost': |
| 516 | nova = ra_nova_connect(project='admin') |
| 517 | for f in nova.aggregates.list(): |
| 518 | if f.name == cmdargs.hostagg_name: |
| 519 | f.add_host(cmdargs.host_name) |
| 520 | |
| 521 | elif cmdargs.which == 'delhost': |
| 522 | nova = ra_nova_connect(project='admin') |
| 523 | for f in nova.aggregates.list(): |
| 524 | if f.name == cmdargs.hostagg_name: |
| 525 | f.remove_host(cmdargs.host_name) |
| 526 | |
| 527 | elif cmdargs.which == 'setmetadata': |
| 528 | nova = ra_nova_connect(project='admin') |
| 529 | for f in nova.aggregates.list(): |
| 530 | if f.name == cmdargs.hostagg_name: |
| 531 | d = dict([cmdargs.extra_specs.split("="),]) |
| 532 | f.set_metadata(d) |
| 533 | |
| 534 | def quota_subcommand(driver, account, cmdargs): |
| 535 | """Process the quota subcommand""" |
| 536 | nova = ra_nova_connect(project='admin') |
| 537 | cfgfile = get_openstack_file(None, cmdargs.project) |
| 538 | kwargs = load_params(cfgfile) |
| 539 | |
| 540 | keystone = keystone_client.Client(username=kwargs.get('OS_USERNAME'), |
| 541 | password=kwargs.get('OS_PASSWORD'), |
| 542 | tenant_name=kwargs.get('OS_TENANT_NAME'), |
| 543 | auth_url=kwargs.get('OS_AUTH_URL')) |
| 544 | if cmdargs.which == 'set': |
| 545 | nova.quotas.update(keystone.tenant_id, |
| 546 | ram=cmdargs.memory, |
| 547 | floating_ips=cmdargs.ips, |
| 548 | instances=cmdargs.vms, |
| 549 | cores=cmdargs.vcpus) |
| 550 | elif cmdargs.which == 'get': |
| 551 | print("get quotas for tenant %s %s" % \ |
| 552 | (cmdargs.project, keystone.tenant_id)) |
| 553 | q = nova.quotas.get(keystone.tenant_id) |
| 554 | for att in [ 'ram', 'floating_ips', 'instances', 'cores' ]: |
| 555 | print("%12s: %6d" % ( att, getattr(q, att) )) |
| 556 | |
| 557 | def rules_subcommand(driver, account, cmdargs): |
| 558 | nova = ra_nova_connect(project='demo') |
| 559 | group=nova.security_groups.find(name='default') |
| 560 | if cmdargs.which == 'set': |
| 561 | try: |
| 562 | nova.security_group_rules.create(group.id,ip_protocol='tcp', from_port=1, to_port=65535 ) |
| 563 | except BadRequest: |
| 564 | pass |
| 565 | try: |
| 566 | nova.security_group_rules.create(group.id, ip_protocol='icmp',from_port=-1, to_port=-1 ) |
| 567 | except BadRequest: |
| 568 | pass |
| 569 | |
| 570 | elif cmdargs.which == 'list': |
| 571 | for r in group.rules: |
| 572 | if r['from_port'] == -1: |
| 573 | print("rule %d proto %s from IP %s" % ( r['id'], r['ip_protocol'], r['ip_range']['cidr'] )) |
| 574 | else: |
| 575 | print("rule %d proto %s from port %d to %d from IP %s" % ( r['id'], r['ip_protocol'], r['from_port'], r['to_port'], r['ip_range']['cidr'] )) |
| 576 | |
| 577 | |
| 578 | def register_subcommand(driver, account, cmdargs): |
| 579 | cmdargs.reserve_new_vms = False |
| 580 | vm_register('all', driver, account, cmdargs) |
| 581 | |
| 582 | ## |
| 583 | # Command line argument specification |
| 584 | ## |
| 585 | desc="""This tool is used to manage the VMs""" |
| 586 | kilo=platform.dist()[1]=='21' |
| 587 | parser = argparse.ArgumentParser(description=desc) |
| 588 | subparsers = parser.add_subparsers() |
| 589 | ipaddr = socket.gethostbyname(socket.getfqdn()) |
| 590 | reservation_server_url = os.environ.get('RESERVATION_SERVER', 'http://reservation.eng.riftio.com:80') |
| 591 | # ipaddr = netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr'] |
| 592 | #default_auth_url = 'http://%s:5000/v3/' % ipaddr |
| 593 | default_auth_url = 'http://10.66.4.27:5000/v3/' |
| 594 | |
| 595 | parser.add_argument('-t', '--provider-type', dest='provider_type', |
| 596 | type=str, default='OPENSTACK', |
| 597 | help='Cloud provider type (default: %(default)s)') |
| 598 | parser.add_argument('-u', '--user-name', dest='user', |
| 599 | type=str, default='demo', |
| 600 | help='User name (default: %(default)s)') |
| 601 | parser.add_argument('-p', '--password', dest='passwd', |
| 602 | type=str, default='mypasswd', |
| 603 | help='Password (default: %(default)s)') |
| 604 | parser.add_argument('-m', '--mgmt-nw', dest='mgmt_network', |
| 605 | type=str, default='private', |
| 606 | help='mgmt-network (default: %(default)s)') |
| 607 | parser.add_argument('-a', '--auth-url', dest='auth_url', |
| 608 | type=str, default=default_auth_url, |
| 609 | help='Password (default: %(default)s)') |
| 610 | parser.add_argument('-r', '--reservation_server_url', dest='reservation_server_url', |
| 611 | type=str, default=reservation_server_url, |
| 612 | help='reservation server url, use None to disable (default %(default)s)' ) |
| 613 | parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='raise the logging level') |
| 614 | |
| 615 | ## |
| 616 | # Subparser for VM |
| 617 | ## |
| 618 | vm_parser = subparsers.add_parser('vm') |
| 619 | vm_subparsers = vm_parser.add_subparsers() |
| 620 | |
| 621 | # Create VM subparser |
| 622 | vm_create_parser = vm_subparsers.add_parser('create') |
| 623 | vm_create_parser.add_argument('-c', '--count', |
| 624 | type=int, default=1, |
| 625 | help='The number of VMs to launch ' |
| 626 | '(default: %(default)d)') |
| 627 | vm_create_parser.add_argument('-i', '--image', |
| 628 | default='rwopenstack_vm', |
| 629 | help='Specify the image for the VM (default: %(default)s)') |
| 630 | vm_create_parser.add_argument('-n', '--name', |
| 631 | help='Specify the name of the VM') |
| 632 | vm_create_parser.add_argument('-f', '--flavor', |
| 633 | help='Specify the flavor for the VM') |
| 634 | vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms', |
| 635 | action='store_true', help='reserve any newly created VMs') |
| 636 | vm_create_parser.add_argument('-s', '--single', dest='wait_after_create', |
| 637 | action='store_true', help='wait for each VM to start before creating the next') |
| 638 | vm_create_parser.add_argument('-N', '--networks', dest='networks', type=str, |
| 639 | default='private,private2,private3,private4', |
| 640 | help='comma separated list of networks to connect these VMs to (default: %(default)s)' ) |
| 641 | |
| 642 | vm_create_parser.set_defaults(which='create') |
| 643 | # Reboot VM subparser |
| 644 | vm_reboot_parser = vm_subparsers.add_parser('reboot') |
| 645 | group = vm_reboot_parser.add_mutually_exclusive_group() |
| 646 | group.add_argument('-n', '--vm-name', dest='vm_name', |
| 647 | type=str, |
| 648 | help='Specify the name of the VM') |
| 649 | group.add_argument('-a', '--reboot-all', |
| 650 | dest='reboot_all', action='store_true', |
| 651 | help='Reboot all VMs') |
| 652 | vm_reboot_parser.add_argument('-s', '--sleep', dest='sleep_time', type=int, default=4, help='time in seconds to sleep between reboots') |
| 653 | vm_reboot_parser.set_defaults(which='reboot') |
| 654 | |
| 655 | |
| 656 | """ |
| 657 | # start VM subparser |
| 658 | vm_start_parser = vm_subparsers.add_parser('start') |
| 659 | group = vm_start_parser.add_mutually_exclusive_group() |
| 660 | group.add_argument('-n', '--vm-name', dest='vm_name', |
| 661 | type=str, |
| 662 | help='Specify the name of the VM') |
| 663 | group.add_argument('-a', '--start-all', |
| 664 | dest='start_all', action='store_true', |
| 665 | help='Start all VMs') |
| 666 | vm_start_parser.set_defaults(which='start') |
| 667 | """ |
| 668 | |
| 669 | # Destroy VM subparser |
| 670 | vm_destroy_parser = vm_subparsers.add_parser('destroy') |
| 671 | group = vm_destroy_parser.add_mutually_exclusive_group() |
| 672 | group.add_argument('-n', '--vm-name', dest='vm_name', |
| 673 | type=str, |
| 674 | help='Specify the name of the VM (accepts regular expressions)') |
| 675 | group.add_argument('-a', '--destroy-all', |
| 676 | dest='destroy_all', action='store_true', |
| 677 | help='Delete all VMs') |
| 678 | group.add_argument('-w', '--wait', |
| 679 | dest='wait', action='store_true', |
| 680 | help='destroy all and wait until all VMs have exited') |
| 681 | vm_destroy_parser.set_defaults(which='destroy') |
| 682 | |
| 683 | # Rebuild VM subparser |
| 684 | vm_rebuild_parser = vm_subparsers.add_parser('rebuild') |
| 685 | group = vm_rebuild_parser.add_mutually_exclusive_group() |
| 686 | group.add_argument('-n', '--vm-name', dest='vm_name', |
| 687 | type=str, |
| 688 | help='Specify the name of the VM (accepts regular expressions)') |
| 689 | group.add_argument('-a', '--rebuild-all', |
| 690 | dest='rebuild_all', action='store_true', |
| 691 | help='Rebuild all VMs') |
| 692 | vm_rebuild_parser.add_argument('-i', '--image-name', dest='image_name', |
| 693 | type=str, |
| 694 | help='Specify the name of the image') |
| 695 | vm_rebuild_parser.set_defaults(which='rebuild') |
| 696 | |
| 697 | # List VM subparser |
| 698 | vm_list_parser = vm_subparsers.add_parser('list') |
| 699 | vm_list_parser.set_defaults(which='list') |
| 700 | vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly', |
| 701 | action='store_true', |
| 702 | help='only list IP addresses') |
| 703 | |
| 704 | vm_parser.set_defaults(func=vm_subcommand) |
| 705 | |
| 706 | ## |
| 707 | # Subparser for image |
| 708 | ## |
| 709 | image_parser = subparsers.add_parser('image') |
| 710 | image_subparsers = image_parser.add_subparsers() |
| 711 | |
| 712 | # List image subparser |
| 713 | image_list_parser = image_subparsers.add_parser('list') |
| 714 | image_list_parser.set_defaults(which='list') |
| 715 | |
| 716 | # Delete image subparser |
| 717 | image_destroy_parser = image_subparsers.add_parser('delete') |
| 718 | group = image_destroy_parser.add_mutually_exclusive_group() |
| 719 | group.add_argument('-n', '--image-name', dest='image_name', |
| 720 | type=str, |
| 721 | help='Specify the name of the image') |
| 722 | group.add_argument('-a', '--delete-all', |
| 723 | dest='delete_all', action='store_true', |
| 724 | help='Delete all images') |
| 725 | image_destroy_parser.set_defaults(which='delete') |
| 726 | |
| 727 | # create image |
| 728 | image_create_parser = image_subparsers.add_parser('create') |
| 729 | image_create_parser.set_defaults(which='create') |
| 730 | image_create_parser.add_argument('-n', '--image-name', dest='image_name', |
| 731 | type=str, |
| 732 | default="rwopenstack_vm", |
| 733 | help='Specify the name of the image') |
| 734 | image_create_parser.add_argument('-f', '--filename', dest='file_name', |
| 735 | type=str, |
| 736 | default='/net/sharedfiles/home1/common/vm/rift-root-current.qcow2', |
| 737 | help='name of the existing qcow2 image file') |
| 738 | |
| 739 | |
| 740 | image_create_parser = image_subparsers.add_parser('getid') |
| 741 | image_create_parser.set_defaults(which='getid') |
| 742 | image_create_parser.add_argument('-n', '--image-name', dest='image_name', |
| 743 | type=str, |
| 744 | default="rwopenstack_vm", |
| 745 | help='Specify the name of the image') |
| 746 | image_parser.set_defaults(func=image_subcommand) |
| 747 | |
| 748 | ## |
| 749 | # Subparser for flavor |
| 750 | ## |
| 751 | flavor_parser = subparsers.add_parser('flavor') |
| 752 | flavor_subparsers = flavor_parser.add_subparsers() |
| 753 | |
| 754 | # List flavor subparser |
| 755 | flavor_list_parser = flavor_subparsers.add_parser('list') |
| 756 | flavor_list_parser.set_defaults(which='list') |
| 757 | |
| 758 | # Create flavor subparser |
| 759 | flavor_create_parser = flavor_subparsers.add_parser('create') |
| 760 | flavor_create_parser.set_defaults(which='create') |
| 761 | flavor_create_parser.add_argument('-n', '--flavor-name', dest='flavor_name', |
| 762 | type=str, |
| 763 | help='Specify the name of the flavor') |
| 764 | flavor_create_parser.add_argument('-m', '--memory-size', dest='memory_size', |
| 765 | type=int, default=1024, |
| 766 | help='Specify the size of the memory in MB ' |
| 767 | '(default: %(default)d)') |
| 768 | flavor_create_parser.add_argument('-d', '--disc-size', dest='disc_size', |
| 769 | type=int, default=16, |
| 770 | help='Specify the size of the disc in GB ' |
| 771 | '(default: %(default)d)') |
| 772 | flavor_create_parser.add_argument('-v', '--vcpu-count', dest='vcpu_count', |
| 773 | type=int, default=1, |
| 774 | help='Specify the number of VCPUs ' |
| 775 | '(default: %(default)d)') |
| 776 | flavor_create_parser.add_argument('-p', '--pci-count', dest='pci_count', |
| 777 | type=int, default=0, |
| 778 | help='Specify the number of PCI devices ' |
| 779 | '(default: %(default)d)') |
| 780 | flavor_create_parser.add_argument('-s', '--pci-speed', dest='pci_speed', |
| 781 | type=int, default=10, |
| 782 | help='Specify the speed of the PCI devices in Gbps (default: %(default)d)') |
| 783 | flavor_create_parser.add_argument('-e', '--hostagg-extra-specs', dest='extra_specs', |
| 784 | type=str, |
| 785 | help='Specify the extra spec ') |
| 786 | flavor_create_parser.add_argument('-b', '--back-with-hugepages', dest='enable_hugepages', |
| 787 | action='store_true', |
| 788 | help='Enable memory backing with hugepages') |
| 789 | flavor_create_parser.add_argument('-B', '--back-with-hugepages-kilo', dest='hugepages_kilo', |
| 790 | type=str, |
| 791 | help='Enable memory backing with hugepages for kilo') |
| 792 | flavor_create_parser.add_argument('-D', '--dedicated_cpu', dest='dedicated_cpu', |
| 793 | action='store_true', |
| 794 | help='Dedicated CPU usage') |
| 795 | flavor_create_parser.add_argument('-T', '--cpu_threads', dest='cpu_threads', |
| 796 | type=str, |
| 797 | help='CPU threads usage') |
| 798 | flavor_create_parser.add_argument('-N', '--numa_nodes', dest='numa_nodes', |
| 799 | type=int, |
| 800 | help='Configure numa nodes') |
| 801 | flavor_create_parser.add_argument('-t', '--trusted-host', dest='trusted_host', action='store_true', help='restrict instances to trusted hosts') |
| 802 | flavor_create_parser.add_argument('-c', '--crypto-cards', dest='colleto', type=int, default=0, \ |
| 803 | help='how many colleto creek VFs should be passed thru to the VM') |
| 804 | |
| 805 | # Delete flavor subparser |
| 806 | flavor_delete_parser = flavor_subparsers.add_parser('delete') |
| 807 | flavor_delete_parser.set_defaults(which='delete') |
| 808 | flavor_delete_parser.add_argument('-n', '--flavor-name', dest='flavor_name', |
| 809 | type=str, |
| 810 | help='Specify the name of the flavor') |
| 811 | |
| 812 | flavor_parser.set_defaults(func=flavor_subcommand) |
| 813 | |
| 814 | ## |
| 815 | # Subparser for host-aggregate |
| 816 | ## |
| 817 | hostagg_parser = subparsers.add_parser('hostagg') |
| 818 | hostagg_subparsers = hostagg_parser.add_subparsers() |
| 819 | |
| 820 | # List host-aggregate subparser |
| 821 | hostagg_list_parser = hostagg_subparsers.add_parser('list') |
| 822 | hostagg_list_parser.set_defaults(which='list') |
| 823 | |
| 824 | # Create hostagg subparser |
| 825 | hostagg_create_parser = hostagg_subparsers.add_parser('create') |
| 826 | hostagg_create_parser.set_defaults(which='create') |
| 827 | hostagg_create_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name', |
| 828 | type=str, |
| 829 | help='Specify the name of the hostagg') |
| 830 | hostagg_create_parser.add_argument('-a', '--avail-zone', dest='avail_zone', |
| 831 | type=str, |
| 832 | help='Specify the name of the availability zone') |
| 833 | # Delete hostagg subparser |
| 834 | hostagg_delete_parser = hostagg_subparsers.add_parser('delete') |
| 835 | hostagg_delete_parser.set_defaults(which='delete') |
| 836 | hostagg_delete_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name', |
| 837 | type=str, |
| 838 | help='Specify the name of the hostagg') |
| 839 | hostagg_delete_parser.add_argument('-f', '--force-delete-hosts', dest='force_delete_hosts', |
| 840 | action='store_true', |
| 841 | help='Delete the existing hosts') |
| 842 | |
| 843 | # Add host subparser |
| 844 | hostagg_addhost_parser = hostagg_subparsers.add_parser('addhost') |
| 845 | hostagg_addhost_parser.set_defaults(which='addhost') |
| 846 | hostagg_addhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name', |
| 847 | type=str, |
| 848 | help='Specify the name of the hostagg') |
| 849 | hostagg_addhost_parser.add_argument('-c', '--compute-host-name', dest='host_name', |
| 850 | type=str, |
| 851 | help='Specify the name of the host to be added') |
| 852 | |
| 853 | # Remove host subparser |
| 854 | hostagg_delhost_parser = hostagg_subparsers.add_parser('delhost') |
| 855 | hostagg_delhost_parser.set_defaults(which='delhost') |
| 856 | hostagg_delhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name', |
| 857 | type=str, |
| 858 | help='Specify the name of the hostagg') |
| 859 | hostagg_delhost_parser.add_argument('-c', '--compute-host-name', dest='host_name', |
| 860 | type=str, |
| 861 | help='Specify the name of the host to be removed') |
| 862 | |
| 863 | # Set meta-data subparser |
| 864 | hostagg_setdata_parser = hostagg_subparsers.add_parser('setmetadata') |
| 865 | hostagg_setdata_parser.set_defaults(which='setmetadata') |
| 866 | hostagg_setdata_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name', |
| 867 | type=str, |
| 868 | help='Specify the name of the hostagg') |
| 869 | hostagg_setdata_parser.add_argument('-d', '--meta-data', dest='extra_specs', |
| 870 | type=str, |
| 871 | help='Specify the meta-data to be associated to this host aggregate') |
| 872 | |
| 873 | hostagg_parser.set_defaults(func=hostagg_subcommand) |
| 874 | |
| 875 | ## |
| 876 | # Subparser for quota |
| 877 | ## |
| 878 | quota_parser = subparsers.add_parser('quota') |
| 879 | quota_subparser = quota_parser.add_subparsers() |
| 880 | quota_set_parser = quota_subparser.add_parser('set') |
| 881 | |
| 882 | # quota set subparser |
| 883 | quota_set_parser.set_defaults(which='set') |
| 884 | quota_set_parser.add_argument('-p', '--project', dest='project', |
| 885 | type=str, default='demo', |
| 886 | help='project name that you wish to set ' |
| 887 | 'the quotas for') |
| 888 | quota_set_parser.add_argument('-c', '--vcpus', dest='vcpus', |
| 889 | type=int, default=48, |
| 890 | help='Maximum number of virtual CPUs that can ' |
| 891 | 'be assigned to all VMs in aggregate') |
| 892 | quota_set_parser.add_argument('-v', '--vms', dest='vms', |
| 893 | type=int, default=24, |
| 894 | help='Maximum number of VMs that can be created ' |
| 895 | 'on this openstack instance ' |
| 896 | '(which may be more than 1 machine)') |
| 897 | quota_set_parser.add_argument('-i', '--ips', dest='ips', |
| 898 | type=int, default=250, |
| 899 | help='Maximum number of Floating IP Addresses ' |
| 900 | 'that can be assigned to all VMs ' |
| 901 | 'in aggregate') |
| 902 | quota_set_parser.add_argument('-m', '--memory', dest='memory', |
| 903 | type=int, default=122880, |
| 904 | help='Maximum amount of RAM in MB that can be ' |
| 905 | 'assigned to all VMs in aggregate') |
| 906 | |
| 907 | # quota get subparser |
| 908 | quota_get_parser = quota_subparser.add_parser('get') |
| 909 | quota_get_parser.add_argument('-p', '--project', dest='project', |
| 910 | type=str, default='demo', |
| 911 | help='project name that you wish to get ' |
| 912 | 'the quotas for') |
| 913 | quota_get_parser.set_defaults(which='get') |
| 914 | quota_parser.set_defaults(func=quota_subcommand) |
| 915 | |
| 916 | ## |
| 917 | # rules subparser |
| 918 | ## |
| 919 | rules_parser = subparsers.add_parser('rules') |
| 920 | rules_parser.set_defaults(func=rules_subcommand) |
| 921 | rules_subparser = rules_parser.add_subparsers() |
| 922 | rules_set_parser = rules_subparser.add_parser('set') |
| 923 | rules_set_parser.set_defaults(which='set') |
| 924 | rules_list_parser = rules_subparser.add_parser('list') |
| 925 | rules_list_parser.set_defaults(which='list') |
| 926 | |
| 927 | register_parser = subparsers.add_parser('register') |
| 928 | register_parser.set_defaults(func=register_subcommand) |
| 929 | |
| 930 | cmdargs = parser.parse_args() |
| 931 | |
| 932 | |
| 933 | if __name__ == "__main__": |
| 934 | logger=logging.getLogger(__name__) |
| 935 | if cmdargs.debug: |
| 936 | logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.DEBUG) |
| 937 | else: |
| 938 | logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.WARNING) |
| 939 | |
| 940 | if cmdargs.provider_type == 'OPENSTACK': |
| 941 | #cls = get_driver(Provider.OPENSTACK) |
| 942 | pass |
| 943 | elif cmdargs.provider_type == 'VSPHERE': |
| 944 | cls = get_driver(Provider.VSPHERE) |
| 945 | else: |
| 946 | sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type) |
| 947 | |
| 948 | if cmdargs.reservation_server_url == "None" or cmdargs.reservation_server_url == "": |
| 949 | cmdargs.reservation_server_url = None |
| 950 | if cmdargs.reservation_server_url is not None: |
| 951 | sys.path.append('/usr/rift/lib') |
| 952 | try: |
| 953 | import ndl |
| 954 | except Exception as e: |
| 955 | logger.warning("Error loading Reservation library") |
| 956 | testbed=None |
| 957 | else: |
| 958 | testbed=ndl.Testbed() |
| 959 | testbed.set_server(cmdargs.reservation_server_url) |
| 960 | |
| 961 | |
| 962 | |
| 963 | if cmdargs.provider_type == 'OPENSTACK': |
| Jeremy Mordkoff | 4870d0e | 2017-09-30 20:28:33 -0400 | [diff] [blame] | 964 | account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList() |
| Jeremy Mordkoff | 6f07e6f | 2016-09-07 18:56:51 -0400 | [diff] [blame] | 965 | account.account_type = "openstack" |
| 966 | account.openstack.key = cmdargs.user |
| 967 | account.openstack.secret = cmdargs.passwd |
| 968 | account.openstack.auth_url = cmdargs.auth_url |
| 969 | account.openstack.tenant = cmdargs.user |
| 970 | account.openstack.mgmt_network = cmdargs.mgmt_network |
| 971 | |
| 972 | plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0') |
| 973 | engine, info, extension = plugin() |
| 974 | driver = plugin.get_interface("Cloud") |
| 975 | # Get the RwLogger context |
| 976 | rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log") |
| 977 | try: |
| 978 | rc = driver.init(rwloggerctx) |
| 979 | assert rc == RwStatus.SUCCESS |
| 980 | except: |
| 981 | logger.error("ERROR:Cal plugin instantiation failed. Aborting tests") |
| 982 | else: |
| 983 | logger.info("Openstack Cal plugin successfully instantiated") |
| 984 | |
| 985 | cmdargs.func(driver, account, cmdargs) |
| 986 | |
| 987 | elif cmdargs.provider_type == 'VSPHERE': |
| 988 | driver = cls(cmdargs.user, cmdargs.passwd, host='vcenter' ) |
| 989 | cmdargs.func(driver, cmdargs) |