--- /dev/null
-__version__="0.5.8-r518"
-version_date="Jan 2017"
-database_version="0.19" #expected database schema version
+ #!/usr/bin/env python
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ openmano server.
+ Main program that implements a reference NFVO (Network Functions Virtualisation Orchestrator).
+ It interfaces with an NFV VIM through its API and offers a northbound interface, based on REST (openmano API),
+ where NFV services are offered including the creation and deletion of VNF templates, VNF instances,
+ network service templates and network service instances.
+
+ It loads the configuration file and launches the http_server thread that will listen requests using openmano API.
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+ __date__ ="$26-aug-2014 11:09:29$"
- default_tokens ={'http_port':9090,
- 'http_host':'localhost',
- 'http_console_proxy': True,
- 'http_console_host': None,
- 'log_level': 'DEBUG',
- 'log_socket_port': 9022,
- 'auto_push_VNF_to_VIMs': True
- }
++__version__="0.5.9-r519"
++version_date="Mar 2017"
++database_version="0.20" #expected database schema version
+
+ import time
+ import sys
+ import getopt
+ import yaml
+ from jsonschema import validate as js_v, exceptions as js_e
+ import logging
+ import logging.handlers as log_handlers
+ import socket
+ from osm_ro import httpserver, nfvo, nfvo_db
+ from osm_ro.openmano_schemas import config_schema
+ from osm_ro.db_base import db_base_Exception
+ import osm_ro
+
+ global global_config
+ global logger
+
+ class LoadConfigurationException(Exception):
+ pass
+
+ def load_configuration(configuration_file):
- for log_module in ("nfvo", "http", "vim", "db", "console"):
++ default_tokens = {'http_port':9090,
++ 'http_host':'localhost',
++ 'http_console_proxy': True,
++ 'http_console_host': None,
++ 'log_level': 'DEBUG',
++ 'log_socket_port': 9022,
++ 'auto_push_VNF_to_VIMs': True,
++ 'db_host': 'localhost',
++ 'db_ovim_host': 'localhost'
++ }
+ try:
+ #Check config file exists
+ with open(configuration_file, 'r') as f:
+ config_str = f.read()
+ #Parse configuration file
+ config = yaml.load(config_str)
+ #Validate configuration file with the config_schema
+ js_v(config, config_schema)
+
+ #Add default values tokens
+ for k,v in default_tokens.items():
+ if k not in config:
+ config[k]=v
+ return config
+
+ except yaml.YAMLError as e:
+ error_pos = ""
+ if hasattr(e, 'problem_mark'):
+ mark = e.problem_mark
+ error_pos = " at line:{} column:{}".format(mark.line+1, mark.column+1)
+ raise LoadConfigurationException("Bad YAML format at configuration file '{file}'{pos}".format(file=configuration_file, pos=error_pos) )
+ except js_e.ValidationError as e:
+ error_pos = ""
+ if e.path:
+ error_pos=" at '" + ":".join(map(str, e.path))+"'"
+ raise LoadConfigurationException("Invalid field at configuration file '{file}'{pos} {message}".format(file=configuration_file, pos=error_pos, message=str(e)) )
+ except Exception as e:
+ raise LoadConfigurationException("Cannot load configuration file '{file}' {message}".format(file=configuration_file, message=str(e) ) )
+
+
+ def console_port_iterator():
+ '''this iterator deals with the http_console_ports
+ returning the ports one by one
+ '''
+ index = 0
+ while index < len(global_config["http_console_ports"]):
+ port = global_config["http_console_ports"][index]
+ #print("ports -> ", port)
+ if type(port) is int:
+ yield port
+ else: #this is dictionary with from to keys
+ port2 = port["from"]
+ #print("ports -> ", port, port2)
+ while port2 <= port["to"]:
+ #print("ports -> ", port, port2)
+ yield port2
+ port2 += 1
+ index += 1
+
+
+ def usage():
+ print("Usage: ", sys.argv[0], "[options]")
+ print( " -v|--version: prints current version")
+ print( " -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)")
+ print( " -h|--help: shows this help")
+ print( " -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
+ print( " -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
+ #print( " -V|--vnf-repository: changes the path of the vnf-repository and overrides the path in the configuration file")
+ print( " --log-socket-host HOST: send logs to this host")
+ print( " --log-socket-port PORT: send logs using this port (default: 9022)")
+ print( " --log-file FILE: send logs to this file")
+ return
+
+ if __name__=="__main__":
+ #Configure logging step 1
+ hostname = socket.gethostname()
+ #streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
+ # "%(asctime)s %(name)s %(levelname)s %(filename)s:%(lineno)d %(funcName)s %(process)d: %(message)s"
+ log_formatter_complete = logging.Formatter(
+ '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'.format(host=hostname),
+ datefmt='%Y-%m-%dT%H:%M:%S',
+ )
+ log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
+ log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
+ logging.basicConfig(format=log_format_simple, level= logging.DEBUG)
+ logger = logging.getLogger('openmano')
+ logger.setLevel(logging.DEBUG)
+ socket_handler = None
+ file_handler = None
+ # Read parameters and configuration file
+ httpthread = None
+ try:
+ #load parameters and configuration
+ opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:", ["config=", "help", "version", "port=", "vnf-repository=", "adminport=", "log-socket-host=", "log-socket-port=", "log-file="])
+ port=None
+ port_admin = None
+ config_file = 'osm_ro/openmanod.cfg'
+ vnf_repository = None
+ log_file = None
+ log_socket_host = None
+ log_socket_port = None
+
+ for o, a in opts:
+ if o in ("-v", "--version"):
+ print ("openmanod version " + __version__ + ' ' + version_date)
+ print ("(c) Copyright Telefonica")
+ sys.exit()
+ elif o in ("-h", "--help"):
+ usage()
+ sys.exit()
+ elif o in ("-V", "--vnf-repository"):
+ vnf_repository = a
+ elif o in ("-c", "--config"):
+ config_file = a
+ elif o in ("-p", "--port"):
+ port = a
+ elif o in ("-P", "--adminport"):
+ port_admin = a
+ elif o == "--log-socket-port":
+ log_socket_port = a
+ elif o == "--log-socket-host":
+ log_socket_host = a
+ elif o == "--log-file":
+ log_file = a
+ else:
+ assert False, "Unhandled option"
+ global_config = load_configuration(config_file)
+ #print global_config
+ # Override parameters obtained by command line
+ if port:
+ global_config['http_port'] = port
+ if port_admin:
+ global_config['http_admin_port'] = port_admin
+ if log_socket_host:
+ global_config['log_socket_host'] = log_socket_host
+ if log_socket_port:
+ global_config['log_socket_port'] = log_socket_port
+ if log_file:
+ global_config['log_file'] = log_file
+ # if vnf_repository is not None:
+ # global_config['vnf_repository'] = vnf_repository
+ # else:
+ # if not 'vnf_repository' in global_config:
+ # logger.error( os.getcwd() )
+ # global_config['vnf_repository'] = os.getcwd()+'/vnfrepo'
+ # #print global_config
+ # if not os.path.exists(global_config['vnf_repository']):
+ # logger.error( "Creating folder vnf_repository folder: '%s'.", global_config['vnf_repository'])
+ # try:
+ # os.makedirs(global_config['vnf_repository'])
+ # except Exception as e:
+ # logger.error( "Error '%s'. Ensure the path 'vnf_repository' is properly set at %s",e.args[1], config_file)
+ # exit(-1)
+
+ global_config["console_port_iterator"] = console_port_iterator
+ global_config["console_thread"]={}
+ global_config["console_ports"]={}
+ if not global_config["http_console_host"]:
+ global_config["http_console_host"] = global_config["http_host"]
+ if global_config["http_host"]=="0.0.0.0":
+ global_config["http_console_host"] = socket.gethostname()
+
+ #Configure logging STEP 2
+ if "log_host" in global_config:
+ socket_handler= log_handlers.SocketHandler(global_config["log_socket_host"], global_config["log_socket_port"])
+ socket_handler.setFormatter(log_formatter_complete)
+ if global_config.get("log_socket_level") and global_config["log_socket_level"] != global_config["log_level"]:
+ socket_handler.setLevel(global_config["log_socket_level"])
+ logger.addHandler(socket_handler)
+ #logger.addHandler(log_handlers.SysLogHandler())
+ if "log_file" in global_config:
+ try:
+ file_handler= logging.handlers.RotatingFileHandler(global_config["log_file"], maxBytes=100e6, backupCount=9, delay=0)
+ file_handler.setFormatter(log_formatter_simple)
+ logger.addHandler(file_handler)
+ #logger.debug("moving logs to '%s'", global_config["log_file"])
+ #remove initial stream handler
+ logging.root.removeHandler(logging.root.handlers[0])
+ print ("logging on '{}'".format(global_config["log_file"]))
+ except IOError as e:
+ raise LoadConfigurationException("Cannot open logging file '{}': {}. Check folder exist and permissions".format(global_config["log_file"], str(e)) )
+ #logging.basicConfig(level = getattr(logging, global_config.get('log_level',"debug")))
+ logger.setLevel(getattr(logging, global_config['log_level']))
+ logger.critical("Starting openmano server version: '%s %s' command: '%s'",
+ __version__, version_date, " ".join(sys.argv))
+
- for thread in global_config["console_thread"]:
- thread.terminate = True
++ for log_module in ("nfvo", "http", "vim", "db", "console", "ovim"):
+ log_level_module = "log_level_" + log_module
+ log_file_module = "log_file_" + log_module
+ logger_module = logging.getLogger('openmano.' + log_module)
+ if log_level_module in global_config:
+ logger_module.setLevel(global_config[log_level_module])
+ if log_file_module in global_config:
+ try:
+ file_handler= logging.handlers.RotatingFileHandler(global_config[log_file_module], maxBytes=100e6, backupCount=9, delay=0)
+ file_handler.setFormatter(log_formatter_simple)
+ logger_module.addHandler(file_handler)
+ except IOError as e:
+ raise LoadConfigurationException("Cannot open logging file '{}': {}. Check folder exist and permissions".format(global_config[log_file_module], str(e)) )
+ global_config["logger_"+log_module] = logger_module
+ #httpserver.logger = global_config["logger_http"]
+ #nfvo.logger = global_config["logger_nfvo"]
+
+ # Initialize DB connection
+ mydb = nfvo_db.nfvo_db();
+ mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+ try:
+ r = mydb.get_db_version()
+ if r[1] != database_version:
+ logger.critical("DATABASE wrong version '%s'. \
+ Try to upgrade/downgrade to version '%s' with '%s/database_utils/migrate_mano_db.sh'",
+ r[1], database_version, osm_ro.__path__[0])
+ exit(-1)
+ except db_base_Exception as e:
+ logger.critical("DATABASE is not a MANO one or it is a '0.0' version. Try to upgrade to version '%s' with \
+ './database_utils/migrate_mano_db.sh'", database_version)
+ exit(-1)
+
+ nfvo.global_config=global_config
+ nfvo.start_service(mydb)
+
+ httpthread = httpserver.httpserver(mydb, False, global_config['http_host'], global_config['http_port'])
+
+ httpthread.start()
+ if 'http_admin_port' in global_config:
+ httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'], global_config['http_admin_port'])
+ httpthreadadmin.start()
+ time.sleep(1)
+ logger.info('Waiting for http clients')
+ print('Waiting for http clients')
+ print('openmanod ready')
+ print('====================')
+ time.sleep(20)
+ sys.stdout.flush()
+
+ #TODO: Interactive console must be implemented here instead of join or sleep
+
+ #httpthread.join()
+ #if 'http_admin_port' in global_config:
+ # httpthreadadmin.join()
+ while True:
+ time.sleep(86400)
+
+ except KeyboardInterrupt as e:
+ logger.info(str(e))
+ except SystemExit:
+ pass
+ except getopt.GetoptError as e:
+ logger.critical(str(e)) # will print something like "option -a not recognized"
+ #usage()
+ exit(-1)
+ except LoadConfigurationException as e:
+ logger.critical(str(e))
+ exit(-1)
+ except db_base_Exception as e:
+ logger.critical(str(e))
+ exit(-1)
+ nfvo.stop_service()
+ if httpthread:
+ httpthread.join(1)
+
--- /dev/null
-
-# if type(data[1]) is tuple: #this can only happen in a WHERE_OR clause
-# text =[]
-# for d in data[1]:
-# if d==None:
-# text.append(str(data[0]) + " is Null")
-# continue
-# out=str(d)
-# if "'" not in out:
-# text.append( str(data[0]) + "='" + out + "'" )
-# elif '"' not in out:
-# text.append( str(data[0]) + '="' + out + '"' )
-# else:
-# text.append( str(data[0]) + '=' + json.dumps(out) )
-# return " OR ".join(text)
-
- out=str(data[1])
- return str(data[0]) + '=' + json.dumps(out)
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ Base class for openmano database manipulation
+ '''
+ __author__="Alfonso Tierno"
+ __date__ ="$4-Apr-2016 10:05:01$"
+
+ import MySQLdb as mdb
+ import uuid as myUuid
+ import utils as af
+ import json
+ #import yaml
+ import time
+ import logging
+ import datetime
+ from jsonschema import validate as js_v, exceptions as js_e
+
+ HTTP_Bad_Request = 400
+ HTTP_Unauthorized = 401
+ HTTP_Not_Found = 404
+ HTTP_Method_Not_Allowed = 405
+ HTTP_Request_Timeout = 408
+ HTTP_Conflict = 409
+ HTTP_Service_Unavailable = 503
+ HTTP_Internal_Server_Error = 500
+
+ def _check_valid_uuid(uuid):
+ id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+ id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
+ try:
+ js_v(uuid, id_schema)
+ return True
+ except js_e.ValidationError:
+ try:
+ js_v(uuid, id_schema2)
+ return True
+ except js_e.ValidationError:
+ return False
+ return False
+
+ def _convert_datetime2str(var):
+ '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+ It enters recursively in the dict var finding this kind of variables
+ '''
+ if type(var) is dict:
+ for k,v in var.items():
+ if type(v) is datetime.datetime:
+ var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+ elif type(v) is dict or type(v) is list or type(v) is tuple:
+ _convert_datetime2str(v)
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for v in var:
+ _convert_datetime2str(v)
+
+ def _convert_bandwidth(data, reverse=False, logger=None):
+ '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
+ It assumes that bandwidth is well formed
+ Attributes:
+ 'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+ 'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+ Return:
+ None
+ '''
+ if type(data) is dict:
+ for k in data.keys():
+ if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+ _convert_bandwidth(data[k], reverse, logger)
+ if "bandwidth" in data:
+ try:
+ value=str(data["bandwidth"])
+ if not reverse:
+ pos = value.find("bps")
+ if pos>0:
+ if value[pos-1]=="G": data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
+ elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
+ else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
+ else:
+ value = int(data["bandwidth"])
+ if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
+ else: data["bandwidth"]=str(value) + " Mbps"
+ except:
+ if logger:
+ logger.error("convert_bandwidth exception for type '%s' data '%s'", type(data["bandwidth"]), data["bandwidth"])
+ return
+ if type(data) is tuple or type(data) is list:
+ for k in data:
+ if type(k) is dict or type(k) is tuple or type(k) is list:
+ _convert_bandwidth(k, reverse, logger)
+
+ def _convert_str2boolean(data, items):
+ '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+ Done recursively
+ Attributes:
+ 'data': dictionary variable to be checked. None or empty is considered valid
+ 'items': tuple of keys to convert
+ Return:
+ None
+ '''
+ if type(data) is dict:
+ for k in data.keys():
+ if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+ _convert_str2boolean(data[k], items)
+ if k in items:
+ if type(data[k]) is str:
+ if data[k]=="false" or data[k]=="False" or data[k]=="0": data[k]=False
+ elif data[k]=="true" or data[k]=="True" or data[k]=="1": data[k]=True
+ elif type(data[k]) is int:
+ if data[k]==0: data[k]=False
+ elif data[k]==1: data[k]=True
+ if type(data) is tuple or type(data) is list:
+ for k in data:
+ if type(k) is dict or type(k) is tuple or type(k) is list:
+ _convert_str2boolean(k, items)
+
+ class db_base_Exception(Exception):
+ '''Common Exception for all database exceptions'''
+
+ def __init__(self, message, http_code=HTTP_Bad_Request):
+ Exception.__init__(self, message)
+ self.http_code = http_code
+
+ class db_base():
+ tables_with_created_field=()
+
+ def __init__(self, host=None, user=None, passwd=None, database=None, log_name='db', log_level=None):
+ self.host = host
+ self.user = user
+ self.passwd = passwd
+ self.database = database
+ self.con = None
+ self.log_level=log_level
+ self.logger = logging.getLogger(log_name)
+ if self.log_level:
+ self.logger.setLevel( getattr(logging, log_level) )
+
+ def connect(self, host=None, user=None, passwd=None, database=None):
+ '''Connect to specific data base.
+ The first time a valid host, user, passwd and database must be provided,
+ Following calls can skip this parameters
+ '''
+ try:
+ if host: self.host = host
+ if user: self.user = user
+ if passwd: self.passwd = passwd
+ if database: self.database = database
+
+ self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
+ self.logger.debug("DB: connected to '%s' at '%s@%s'", self.database, self.user, self.host)
+ except mdb.Error as e:
+ raise db_base_Exception("Cannot connect to DataBase '{}' at '{}@{}' Error {}: {}".format(
+ self.database, self.user, self.host, e.args[0], e.args[1]),
+ http_code = HTTP_Unauthorized )
+
+ def get_db_version(self):
+ ''' Obtain the database schema version.
+ Return: (negative, text) if error or version 0.0 where schema_version table is missing
+ (version_int, version_text) if ok
+ '''
+ cmd = "SELECT version_int,version FROM schema_version"
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ highest_version_int=0
+ highest_version=""
+ for row in rows: #look for the latest version
+ if row[0]>highest_version_int:
+ highest_version_int, highest_version = row[0:2]
+ return highest_version_int, highest_version
+ except (mdb.Error, AttributeError) as e:
+ #self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
+ self._format_error(e, tries)
+ tries -= 1
+
+ def disconnect(self):
+ '''disconnect from specific data base'''
+ try:
+ self.con.close()
+ self.con = None
+ except mdb.Error as e:
+ self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+ return
+ except AttributeError as e: #self.con not defined
+ if e[0][-5:] == "'con'":
+ self.logger.warn("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+ return
+ else:
+ raise
+
+ def _format_error(self, e, tries=1, command=None, extra=None):
+ '''Creates a text error base on the produced exception
+ Params:
+ e: mdb exception
+ retry: in case of timeout, if reconnecting to database and retry, or raise and exception
+ cmd: database command that produce the exception
+ command: if the intention is update or delete
+ extra: extra information to add to some commands
+ Return
+ HTTP error in negative, formatted error text
+ '''
+ if isinstance(e,AttributeError ):
+ raise db_base_Exception("DB Exception " + str(e), HTTP_Internal_Server_Error)
+ if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query
+ if tries>1:
+ self.logger.warn("DB Exception '%s'. Retry", str(e))
+ #reconnect
+ self.connect()
+ return
+ else:
+ raise db_base_Exception("Database connection timeout Try Again", HTTP_Request_Timeout)
+
+ fk=e.args[1].find("foreign key constraint fails")
+ if fk>=0:
+ if command=="update":
+ raise db_base_Exception("tenant_id '{}' not found.".format(extra), HTTP_Not_Found)
+ elif command=="delete":
+ raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), HTTP_Conflict)
+ de = e.args[1].find("Duplicate entry")
+ fk = e.args[1].find("for key")
+ uk = e.args[1].find("Unknown column")
+ wc = e.args[1].find("in 'where clause'")
+ fl = e.args[1].find("in 'field list'")
+ #print de, fk, uk, wc,fl
+ if de>=0:
+ if fk>=0: #error 1062
+ raise db_base_Exception("Value {} already in use for {}".format(e.args[1][de+15:fk], e.args[1][fk+7:]), HTTP_Conflict)
+ if uk>=0:
+ if wc>=0:
+ raise db_base_Exception("Field {} can not be used for filtering".format(e.args[1][uk+14:wc]), HTTP_Bad_Request)
+ if fl>=0:
+ raise db_base_Exception("Field {} does not exist".format(e.args[1][uk+14:wc]), HTTP_Bad_Request)
+ raise db_base_Exception("Database internal Error {}: {}".format(e.args[0], e.args[1]), HTTP_Internal_Server_Error)
+
+ def __str2db_format(self, data):
+ '''Convert string data to database format.
+ If data is None it returns the 'Null' text,
+ otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
+ '''
+ if data==None:
+ return 'Null'
++ elif isinstance(data[1], str):
++ return json.dumps(data)
+ else:
+ return json.dumps(str(data))
+
+ def __tuple2db_format_set(self, data):
+ '''Compose the needed text for a SQL SET, parameter 'data' is a pair tuple (A,B),
+ and it returns the text 'A="B"', where A is a field of a table and B is the value
+ If B is None it returns the 'A=Null' text, without surrounding Null by quotes
+ If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
+ and it ensures internal quotes of B are escaped.
+ '''
+ if data[1]==None:
+ return str(data[0]) + "=Null"
++ elif isinstance(data[1], str):
++ return str(data[0]) + '=' + json.dumps(data[1])
+ else:
+ return str(data[0]) + '=' + json.dumps(str(data[1]))
+
+ def __tuple2db_format_where(self, data):
+ '''Compose the needed text for a SQL WHERE, parameter 'data' is a pair tuple (A,B),
+ and it returns the text 'A="B"', where A is a field of a table and B is the value
+ If B is None it returns the 'A is Null' text, without surrounding Null by quotes
+ If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
+ and it ensures internal quotes of B are escaped.
+ '''
+ if data[1]==None:
+ return str(data[0]) + " is Null"
- out=str(data[1])
- return str(data[0]) + '<>' + json.dumps(out)
++ elif isinstance(data[1], str):
++ return str(data[0]) + '=' + json.dumps(data[1])
++ else:
++ return str(data[0]) + '=' + json.dumps(str(data[1]))
+
+ def __tuple2db_format_where_not(self, data):
+ '''Compose the needed text for a SQL WHERE(not). parameter 'data' is a pair tuple (A,B),
+ and it returns the text 'A<>"B"', where A is a field of a table and B is the value
+ If B is None it returns the 'A is not Null' text, without surrounding Null by quotes
+ If B is not None it returns the text "A<>'B'" or 'A<>"B"' where B is surrounded by quotes,
+ and it ensures internal quotes of B are escaped.
+ '''
+ if data[1]==None:
+ return str(data[0]) + " is not Null"
++ elif isinstance(data[1], str):
++ return str(data[0]) + '<>' + json.dumps(data[1])
++ else:
++ return str(data[0]) + '<>' + json.dumps(str(data[1]))
+
+ def __remove_quotes(self, data):
+ '''remove single quotes ' of any string content of data dictionary'''
+ for k,v in data.items():
+ if type(v) == str:
+ if "'" in v:
+ data[k] = data[k].replace("'","_")
+
+ def _update_rows(self, table, UPDATE, WHERE, modified_time=0):
+ ''' Update one or several rows into a table.
+ Atributes
+ UPDATE: dictionary with the key: value to change
+ table: table where to update
+ WHERE: dictionary of elements to update
+ Return: the number of updated rows, exception if error
+ '''
+ #gettting uuid
+ values = ",".join(map(self.__tuple2db_format_set, UPDATE.iteritems() ))
+ if modified_time:
+ values += ",modified_at={:f}".format(modified_time)
+ cmd= "UPDATE " + table +" SET " + values +\
+ " WHERE " + " and ".join(map(self.__tuple2db_format_where, WHERE.iteritems() ))
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ return self.cur.rowcount
+
+ def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0):
+ ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+ Attribute
+ INSERT: dictionary with the key:value to insert
+ table: table where to insert
+ add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+ created_time: time to add to the created_time column
+ It checks presence of uuid and add one automatically otherwise
+ Return: uuid
+ '''
+
+ if add_uuid:
+ #create uuid if not provided
+ if 'uuid' not in INSERT:
+ uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
+ else:
+ uuid = str(INSERT['uuid'])
+ else:
+ uuid=None
+ if add_uuid:
+ #defining root_uuid if not provided
+ if root_uuid is None:
+ root_uuid = uuid
+ if created_time:
+ created_at = created_time
+ else:
+ created_at=time.time()
+ #inserting new uuid
+ cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(uuid, root_uuid, table, created_at)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ #insertion
+ cmd= "INSERT INTO " + table +" SET " + \
+ ",".join(map(self.__tuple2db_format_set, INSERT.iteritems() ))
+ if created_time:
+ cmd += ",created_at=%f" % created_time
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ self.cur.rowcount
+ return uuid
+
+ def _get_rows(self,table,uuid):
+ cmd = "SELECT * FROM {} WHERE uuid='{}'".format(str(table), str(uuid))
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ def new_row(self, table, INSERT, add_uuid=False, created_time=0):
+ ''' Add one row into a table.
+ Attribute
+ INSERT: dictionary with the key: value to insert
+ table: table where to insert
+ tenant_id: only useful for logs. If provided, logs will use this tenant_id
+ add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+ It checks presence of uuid and add one automatically otherwise
+ Return: (result, uuid) where result can be 0 if error, or 1 if ok
+ '''
+ if table in self.tables_with_created_field and created_time==0:
+ created_time=time.time()
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ return self._new_row_internal(table, INSERT, add_uuid, None, created_time)
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def update_rows(self, table, UPDATE, WHERE, modified_time=0):
+ ''' Update one or several rows into a table.
+ Atributes
+ UPDATE: dictionary with the key: value to change
+ table: table where to update
+ WHERE: dictionary of elements to update
+ Return: (result, descriptive text) where result indicates the number of updated files
+ '''
+ if table in self.tables_with_created_field and modified_time==0:
+ modified_time=time.time()
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ return self._update_rows(table, UPDATE, WHERE)
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def delete_row_by_id(self, table, uuid):
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ #delete host
+ self.cur = self.con.cursor()
+ cmd = "DELETE FROM {} WHERE uuid = '{}'".format(table, uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ if deleted:
+ #delete uuid
+ self.cur = self.con.cursor()
+ cmd = "DELETE FROM uuids WHERE root_uuid = '{}'".format(uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ return deleted
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries, "delete", "dependencies")
+ tries -= 1
+
+ def delete_row(self, **sql_dict):
+ ''' Deletes rows from a table.
+ Attribute sql_dir: dictionary with the following key: value
+ 'FROM': string of table name (Mandatory)
+ 'WHERE': dict of key:values, translated to key=value AND ... (Optional)
+ 'WHERE_NOT': dict of key:values, translated to key<>value AND ... (Optional)
+ if value is None, it is translated to key is not null
+ 'LIMIT': limit of number of rows (Optional)
+ Return: the number of deleted or exception if error
+ '''
+ #print sql_dict
+ from_ = "FROM " + str(sql_dict['FROM'])
+ #print 'from_', from_
+ if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0:
+ w=sql_dict['WHERE']
+ where_ = "WHERE " + " AND ".join(map(self.__tuple2db_format_where, w.iteritems()))
+ else: where_ = ""
+ if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0:
+ w=sql_dict['WHERE_NOT']
+ where_2 = " AND ".join(map(self.__tuple2db_format_where_not, w.iteritems()))
+ if len(where_)==0: where_ = "WHERE " + where_2
+ else: where_ = where_ + " AND " + where_2
+ #print 'where_', where_
+ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else ""
+ #print 'limit_', limit_
+ cmd = " ".join( ("DELETE", from_, where_, limit_) )
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ return deleted
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def get_rows_by_id(self, table, uuid):
+ '''get row from a table based on uuid'''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def get_rows(self, **sql_dict):
+ ''' Obtain rows from a table.
+ Attribute sql_dir: dictionary with the following key: value
+ 'SELECT': list or tuple of fields to retrieve) (by default all)
+ 'FROM': string of table name (Mandatory)
+ 'WHERE': dict of key:values, translated to key=value (key is null) AND ... (Optional)
+ 'WHERE_NOT': dict of key:values, translated to key<>value (key is not null) AND ... (Optional)
+ 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
+ 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional)
+ 'LIMIT': limit of number of rows (Optional)
+ 'ORDER_BY': list or tuple of fields to order
+ Return: a list with dictionaries at each row
+ '''
+ #print sql_dict
+ select_= "SELECT " + ("*" if 'SELECT' not in sql_dict else ",".join(map(str,sql_dict['SELECT'])) )
+ #print 'select_', select_
+ from_ = "FROM " + str(sql_dict['FROM'])
+ #print 'from_', from_
+ where_and = ""
+ where_or = ""
+ w=sql_dict.get('WHERE')
+ if w:
+ where_and = " AND ".join(map(self.__tuple2db_format_where, w.iteritems() ))
+ w=sql_dict.get('WHERE_NOT')
+ if w:
+ if where_and: where_and += " AND "
+ where_and += " AND ".join(map(self.__tuple2db_format_where_not, w.iteritems() ) )
+ w=sql_dict.get('WHERE_OR')
+ if w:
+ where_or = " OR ".join(map(self.__tuple2db_format_where, w.iteritems() ))
+ if where_and and where_or:
+ if sql_dict.get("WHERE_AND_OR") == "AND":
+ where_ = "WHERE " + where_and + " AND (" + where_or + ")"
+ else:
+ where_ = "WHERE (" + where_and + ") OR " + where_or
+ elif where_and and not where_or:
+ where_ = "WHERE " + where_and
+ elif not where_and and where_or:
+ where_ = "WHERE " + where_or
+ else:
+ where_ = ""
+ #print 'where_', where_
+ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else ""
+ order_ = "ORDER BY " + ",".join(map(str,sql_dict['SELECT'])) if 'ORDER_BY' in sql_dict else ""
+
+ #print 'limit_', limit_
+ cmd = " ".join( (select_, from_, where_, limit_, order_) )
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_serveral=False, WHERE_OR={}, WHERE_AND_OR="OR"):
+ ''' Obtain One row from a table based on name or uuid.
+ Attribute:
+ table: string of table name
+ uuid_name: name or uuid. If not uuid format is found, it is considered a name
+ allow_severeral: if False return ERROR if more than one row are founded
+ error_item_text: in case of error it identifies the 'item' name for a proper output text
+ 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
+ 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
+ Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found
+ if allow_several==True, a list of dictionaries with the row or rows, error if no item is found
+ '''
+
+ if error_item_text==None:
+ error_item_text = table
+ what = 'uuid' if af.check_valid_uuid(uuid_name) else 'name'
+ cmd = " SELECT * FROM {} WHERE {}='{}'".format(table, what, uuid_name)
+ if WHERE_OR:
+ where_or = " OR ".join(map(self.__tuple2db_format_where, WHERE_OR.iteritems() ))
+ if WHERE_AND_OR == "AND":
+ cmd += " AND (" + where_or + ")"
+ else:
+ cmd += " OR " + where_or
+
+
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ number = self.cur.rowcount
+ if number==0:
+ return -HTTP_Not_Found, "No %s found with %s '%s'" %(error_item_text, what, uuid_name)
+ elif number>1 and not allow_serveral:
+ return -HTTP_Bad_Request, "More than one %s found with %s '%s'" %(error_item_text, what, uuid_name)
+ if allow_serveral:
+ rows = self.cur.fetchall()
+ else:
+ rows = self.cur.fetchone()
+ return rows
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def get_uuid(self, uuid):
+ '''check in the database if this uuid is already present'''
+ for retry_ in range(0,2):
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
+ rows = self.cur.fetchall()
+ return self.cur.rowcount, rows
+ except (mdb.Error, AttributeError) as e:
+ print "nfvo_db.get_uuid DB Exception %d: %s" % (e.args[0], e.args[1])
+ r,c = self._format_error(e)
+ if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+ def get_uuid_from_name(self, table, name):
+ '''Searchs in table the name and returns the uuid
+ '''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ where_text = "name='" + name +"'"
+ self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ return 0, "Name %s not found in table %s" %(name, table)
+ elif self.cur.rowcount>1:
+ return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table)
+ return self.cur.rowcount, rows[0]["uuid"]
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
--- /dev/null
- object_schema, netmap_new_schema, netmap_edit_schema
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
+ and will use the nfvo.py module to run the appropriate method.
+ Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia"
+ __date__ ="$17-sep-2014 09:07:15$"
+
+ import bottle
+ import yaml
+ import json
+ import threading
+ import time
+ import logging
+
+ from jsonschema import validate as js_v, exceptions as js_e
+ from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
+ nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
+ scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
+ tenant_schema, tenant_edit_schema,\
+ datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
- '''insert a tenant into the catalogue. '''
++ object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
++ sdn_port_mapping_schema
++
+ import nfvo
+ import utils
+ from db_base import db_base_Exception
+ from functools import wraps
+
+ global mydb
+ global url_base
+ global logger
+ url_base="/openmano"
+ logger = None
+
+ HTTP_Bad_Request = 400
+ HTTP_Unauthorized = 401
+ HTTP_Not_Found = 404
+ HTTP_Forbidden = 403
+ HTTP_Method_Not_Allowed = 405
+ HTTP_Not_Acceptable = 406
+ HTTP_Service_Unavailable = 503
+ HTTP_Internal_Server_Error= 500
+
+ def delete_nulls(var):
+ if type(var) is dict:
+ for k in var.keys():
+ if var[k] is None: del var[k]
+ elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple:
+ if delete_nulls(var[k]): del var[k]
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for k in var:
+ if type(k) is dict: delete_nulls(k)
+ if len(var) == 0: return True
+ return False
+
+ def convert_datetime2str(var):
+ '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+ It enters recursively in the dict var finding this kind of variables
+ '''
+ if type(var) is dict:
+ for k,v in var.items():
+ if type(v) is float and k in ("created_at", "modified_at"):
+ var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
+ elif type(v) is dict or type(v) is list or type(v) is tuple:
+ convert_datetime2str(v)
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for v in var:
+ convert_datetime2str(v)
+
+ def log_to_logger(fn):
+ '''
+ Wrap a Bottle request so that a log line is emitted after it's handled.
+ (This decorator can be extended to take the desired logger as a param.)
+ '''
+ @wraps(fn)
+ def _log_to_logger(*args, **kwargs):
+ actual_response = fn(*args, **kwargs)
+ # modify this to log exactly what you need:
+ logger.info('FROM %s %s %s %s' % (bottle.request.remote_addr,
+ bottle.request.method,
+ bottle.request.url,
+ bottle.response.status))
+ return actual_response
+ return _log_to_logger
+
+ class httpserver(threading.Thread):
+ def __init__(self, db, admin=False, host='localhost', port=9090):
+ #global url_base
+ global mydb
+ global logger
+ #initialization
+ if not logger:
+ logger = logging.getLogger('openmano.http')
+ threading.Thread.__init__(self)
+ self.host = host
+ self.port = port #Port where the listen service must be started
+ if admin==True:
+ self.name = "http_admin"
+ else:
+ self.name = "http"
+ #self.url_preffix = 'http://' + host + ':' + str(port) + url_base
+ mydb = db
+ #self.first_usable_connection_index = 10
+ #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
+ #Ensure that when the main program exits the thread will also exit
+ self.daemon = True
+ self.setDaemon(True)
+
+ def run(self):
+ bottle.install(log_to_logger)
+ bottle.run(host=self.host, port=self.port, debug=False, quiet=True)
+
+ def run_bottle(db, host_='localhost', port_=9090):
+ '''used for launching in main thread, so that it can be debugged'''
+ global mydb
+ mydb = db
+ bottle.run(host=host_, port=port_, debug=True) #quiet=True
+
+
+ @bottle.route(url_base + '/', method='GET')
+ def http_get():
+ #print
+ return 'works' #TODO: to be completed
+
+ #
+ # Util functions
+ #
+
+ def change_keys_http2db(data, http_db, reverse=False):
+ '''Change keys of dictionary data acording to the key_dict values
+ This allow change from http interface names to database names.
+ When reverse is True, the change is otherwise
+ Attributes:
+ data: can be a dictionary or a list
+ http_db: is a dictionary with hhtp names as keys and database names as value
+ reverse: by default change is done from http api to database. If True change is done otherwise
+ Return: None, but data is modified'''
+ if type(data) is tuple or type(data) is list:
+ for d in data:
+ change_keys_http2db(d, http_db, reverse)
+ elif type(data) is dict or type(data) is bottle.FormsDict:
+ if reverse:
+ for k,v in http_db.items():
+ if v in data: data[k]=data.pop(v)
+ else:
+ for k,v in http_db.items():
+ if k in data: data[v]=data.pop(k)
+
+ def format_out(data):
+ '''return string of dictionary data according to requested json, yaml, xml. By default json'''
+ logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) )
+ if 'application/yaml' in bottle.request.headers.get('Accept'):
+ bottle.response.content_type='application/yaml'
+ return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
+ else: #by default json
+ bottle.response.content_type='application/json'
+ #return data #json no style
+ return json.dumps(data, indent=4) + "\n"
+
+ def format_in(default_schema, version_fields=None, version_dict_schema=None):
+ ''' Parse the content of HTTP request against a json_schema
+ Parameters
+ default_schema: The schema to be parsed by default if no version field is found in the client data
+ version_fields: If provided it contains a tuple or list with the fields to iterate across the client data to obtain the version
+ version_dict_schema: It contains a dictionary with the version as key, and json schema to apply as value
+ It can contain a None as key, and this is apply if the client data version does not match any key
+ Return:
+ user_data, used_schema: if the data is successfully decoded and matches the schema
+ launch a bottle abort if fails
+ '''
+ #print "HEADERS :" + str(bottle.request.headers.items())
+ try:
+ error_text = "Invalid header format "
+ format_type = bottle.request.headers.get('Content-Type', 'application/json')
+ if 'application/json' in format_type:
+ error_text = "Invalid json format "
+ #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
+ client_data = json.load(bottle.request.body)
+ #client_data = bottle.request.json()
+ elif 'application/yaml' in format_type:
+ error_text = "Invalid yaml format "
+ client_data = yaml.load(bottle.request.body)
+ elif 'application/xml' in format_type:
+ bottle.abort(501, "Content-Type: application/xml not supported yet.")
+ else:
+ logger.warning('Content-Type ' + str(format_type) + ' not supported.')
+ bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
+ return
+ #if client_data == None:
+ # bottle.abort(HTTP_Bad_Request, "Content error, empty")
+ # return
+
+ logger.debug('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) )
+ #look for the client provider version
+ error_text = "Invalid content "
+ client_version = None
+ used_schema = None
+ if version_fields != None:
+ client_version = client_data
+ for field in version_fields:
+ if field in client_version:
+ client_version = client_version[field]
+ else:
+ client_version=None
+ break
+ if client_version==None:
+ used_schema=default_schema
+ elif version_dict_schema!=None:
+ if client_version in version_dict_schema:
+ used_schema = version_dict_schema[client_version]
+ elif None in version_dict_schema:
+ used_schema = version_dict_schema[None]
+ if used_schema==None:
+ bottle.abort(HTTP_Bad_Request, "Invalid schema version or missing version field")
+
+ js_v(client_data, used_schema)
+ return client_data, used_schema
+ except (ValueError, yaml.YAMLError) as exc:
+ error_text += str(exc)
+ logger.error(error_text)
+ bottle.abort(HTTP_Bad_Request, error_text)
+ except js_e.ValidationError as exc:
+ logger.error("validate_in error, jsonschema exception at '%s' '%s' ", str(exc.path), str(exc.message))
+ error_pos = ""
+ if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
+ bottle.abort(HTTP_Bad_Request, error_text + exc.message + error_pos)
+ #except:
+ # bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type", error_pos)
+ # raise
+
+ def filter_query_string(qs, http2db, allowed):
+ '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
+ Attributes:
+ 'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
+ 'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
+ 'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
+ Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
+ select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
+ where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
+ limit: limit dictated by user with the query string 'limit'. 100 by default
+ abort if not permited, using bottel.abort
+ '''
+ where={}
+ limit=100
+ select=[]
+ #if type(qs) is not bottle.FormsDict:
+ # bottle.abort(HTTP_Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
+ # #bottle.abort(HTTP_Internal_Server_Error, "call programmer")
+ for k in qs:
+ if k=='field':
+ select += qs.getall(k)
+ for v in select:
+ if v not in allowed:
+ bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field="+v+"'")
+ elif k=='limit':
+ try:
+ limit=int(qs[k])
+ except:
+ bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
+ else:
+ if k not in allowed:
+ bottle.abort(HTTP_Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
+ if qs[k]!="null": where[k]=qs[k]
+ else: where[k]=None
+ if len(select)==0: select += allowed
+ #change from http api to database naming
+ for i in range(0,len(select)):
+ k=select[i]
+ if http2db and k in http2db:
+ select[i] = http2db[k]
+ if http2db:
+ change_keys_http2db(where, http2db)
+ #print "filter_query_string", select,where,limit
+
+ return select,where,limit
+
+ @bottle.hook('after_request')
+ def enable_cors():
+ '''Don't know yet if really needed. Keep it just in case'''
+ bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+
+ #
+ # VNFs
+ #
+
+ @bottle.route(url_base + '/tenants', method='GET')
+ def http_get_tenants():
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid','name','description','created_at') )
+ try:
+ tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ convert_datetime2str(tenants)
+ data={'tenants' : tenants}
+ return format_out(data)
+ except db_base_Exception as e:
+ logger.error("http_get_tenants error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
+ def http_get_tenant_id(tenant_id):
+ '''get tenant details, can use both uuid or name'''
+ #obtain data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id, "tenant")
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ convert_datetime2str(tenant)
+ data={'tenant' : tenant}
+ return format_out(data)
+ except db_base_Exception as e:
+ logger.error("http_get_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/tenants', method='POST')
+ def http_post_tenants():
+ '''insert a tenant into the catalogue. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( tenant_schema )
+ r = utils.remove_extra_items(http_content, tenant_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ data = nfvo.new_tenant(mydb, http_content['tenant'])
+ return http_get_tenant_id(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_tenants error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
+ def http_edit_tenant_id(tenant_id):
+ '''edit tenant details, can use both uuid or name'''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( tenant_edit_schema )
+ r = utils.remove_extra_items(http_content, tenant_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ #obtain data, check that only one exist
+ try:
+ tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+ #edit data
+ tenant_id = tenant['uuid']
+ where={'uuid': tenant['uuid']}
+ mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
+ return http_get_tenant_id(tenant_id)
+ except db_base_Exception as e:
+ logger.error("http_edit_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
+ def http_delete_tenant_id(tenant_id):
+ '''delete a tenant from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.delete_tenant(mydb, tenant_id)
+ return format_out({"result":"tenant " + data + " deleted"})
+ except db_base_Exception as e:
+ logger.error("http_delete_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
+ def http_get_datacenters(tenant_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid','name','vim_url','type','created_at') )
+ if tenant_id != 'any':
+ where_['nfvo_tenant_id'] = tenant_id
+ if 'created_at' in select_:
+ select_[ select_.index('created_at') ] = 'd.created_at as created_at'
+ if 'created_at' in where_:
+ where_['d.created_at'] = where_.pop('created_at')
+ datacenters = mydb.get_rows(FROM='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id',
+ SELECT=select_,WHERE=where_,LIMIT=limit_)
+ else:
+ datacenters = mydb.get_rows(FROM='datacenters',
+ SELECT=select_,WHERE=where_,LIMIT=limit_)
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ convert_datetime2str(datacenters)
+ data={'datacenters' : datacenters}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='GET')
+ def http_get_datacenter_id(tenant_id, datacenter_id):
+ '''get datacenter details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ what = 'uuid' if utils.check_valid_uuid(datacenter_id) else 'name'
+ where_={}
+ where_[what] = datacenter_id
+ select_=['uuid', 'name','vim_url', 'vim_url_admin', 'type', 'd.config as config', 'description', 'd.created_at as created_at']
+ if tenant_id != 'any':
+ select_.append("datacenter_tenant_id")
+ where_['td.nfvo_tenant_id']= tenant_id
+ from_='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id'
+ else:
+ from_='datacenters as d'
+ datacenters = mydb.get_rows(
+ SELECT=select_,
+ FROM=from_,
+ WHERE=where_)
+
+ if len(datacenters)==0:
+ bottle.abort( HTTP_Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+ elif len(datacenters)>1:
+ bottle.abort( HTTP_Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+ datacenter = datacenters[0]
+ if tenant_id != 'any':
+ #get vim tenant info
+ vim_tenants = mydb.get_rows(
+ SELECT=("vim_tenant_name", "vim_tenant_id", "user", "passwd", "config"),
+ FROM="datacenter_tenants",
+ WHERE={"uuid": datacenters[0]["datacenter_tenant_id"]},
+ ORDER_BY=("created", ) )
+ del datacenter["datacenter_tenant_id"]
+ datacenter["vim_tenants"] = vim_tenants
+ for vim_tenant in vim_tenants:
+ if vim_tenant["passwd"]:
+ vim_tenant["passwd"] = "******"
+ if vim_tenant['config'] != None:
+ try:
+ config_dict = yaml.load(vim_tenant['config'])
+ vim_tenant['config'] = config_dict
+ except Exception as e:
+ logger.error("Exception '%s' while trying to load config information", str(e))
+
+ if datacenter['config'] != None:
+ try:
+ config_dict = yaml.load(datacenter['config'])
+ datacenter['config'] = config_dict
+ except Exception as e:
+ logger.error("Exception '%s' while trying to load config information", str(e))
+ #change_keys_http2db(content, http2db_datacenter, reverse=True)
+ convert_datetime2str(datacenter)
+ data={'datacenter' : datacenter}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/datacenters', method='POST')
+ def http_post_datacenters():
++ '''insert a datacenter into the catalogue. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( datacenter_schema )
+ r = utils.remove_extra_items(http_content, datacenter_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ data = nfvo.new_datacenter(mydb, http_content['datacenter'])
+ return http_get_datacenter_id('any', data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
+ def http_edit_datacenter_id(datacenter_id_name):
+ '''edit datacenter details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( datacenter_edit_schema )
+ r = utils.remove_extra_items(http_content, datacenter_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ try:
+ datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
+ return http_get_datacenter_id('any', datacenter_id)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_edit_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
++@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
++def http_post_sdn_controller(tenant_id):
++ '''insert a sdn controller into the catalogue. '''
++ #parse input data
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ http_content,_ = format_in( sdn_controller_schema )
++ try:
++ logger.debug("tenant_id: "+tenant_id)
++ #logger.debug("content: {}".format(http_content['sdn_controller']))
++
++ data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
++ return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
++def http_put_sdn_controller_update(tenant_id, controller_id):
++ '''Update sdn controller'''
++ #parse input data
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ http_content,_ = format_in( sdn_controller_edit_schema )
++# r = utils.remove_extra_items(http_content, datacenter_schema)
++# if r:
++# logger.debug("Remove received extra items %s", str(r))
++ try:
++ #logger.debug("tenant_id: "+tenant_id)
++ logger.debug("content: {}".format(http_content['sdn_controller']))
++
++ data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
++ return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
++
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
++def http_get_sdn_controller(tenant_id):
++ '''get sdn controllers list, can use both uuid or name'''
++ try:
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++
++ data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
++ return format_out(data)
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
++def http_get_sdn_controller_id(tenant_id, controller_id):
++ '''get sdn controller details, can use both uuid or name'''
++ try:
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
++ return format_out({"sdn_controllers": data})
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
++def http_delete_sdn_controller_id(tenant_id, controller_id):
++ '''delete sdn controller, can use both uuid or name'''
++ try:
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
++ return format_out(data)
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
++def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
++ '''Set the sdn port mapping for a datacenter. '''
++ #parse input data
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ http_content, _ = format_in(sdn_port_mapping_schema)
++# r = utils.remove_extra_items(http_content, datacenter_schema)
++# if r:
++# logger.debug("Remove received extra items %s", str(r))
++ try:
++ data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
++ return format_out({"sdn_port_mapping": data})
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
++def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
++ '''get datacenter sdn mapping details, can use both uuid or name'''
++ try:
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++
++ data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
++ return format_out({"sdn_port_mapping": data})
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
++
++@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
++def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
++ '''clean datacenter sdn mapping, can use both uuid or name'''
++ try:
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
++ return format_out({"result": data})
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET') #deprecated
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='GET')
+ def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+ '''get datacenter networks, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #obtain data
+ try:
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+ where_= {"datacenter_id":datacenter_dict['uuid']}
+ if netmap_id:
+ if utils.check_valid_uuid(netmap_id):
+ where_["uuid"] = netmap_id
+ else:
+ where_["name"] = netmap_id
+ netmaps =mydb.get_rows(FROM='datacenter_nets',
+ SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
+ WHERE=where_ )
+ convert_datetime2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ if netmap_id and len(netmaps)==1:
+ data={'netmap' : netmaps[0]}
+ elif netmap_id and len(netmaps)==0:
+ bottle.abort(HTTP_Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
+ return
+ else:
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_getnetwork_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='DELETE')
+ def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+ '''get datacenter networks, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #obtain data
+ try:
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+ where_= {"datacenter_id":datacenter_dict['uuid']}
+ if netmap_id:
+ if utils.check_valid_uuid(netmap_id):
+ where_["uuid"] = netmap_id
+ else:
+ where_["name"] = netmap_id
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
+ if deleted == 0 and netmap_id :
+ bottle.abort(HTTP_Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
+ if netmap_id:
+ return format_out({"result": "netmap %s deleted" % netmap_id})
+ else:
+ return format_out({"result": "%d netmap deleted" % deleted})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
+ def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
+ convert_datetime2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_uploadnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
+ def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
+ '''creates a new netmap'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( netmap_new_schema )
+ r = utils.remove_extra_items(http_content, netmap_new_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #obtain data, check that only one exist
+ netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
+ convert_datetime2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_postnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
+ def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
+ '''edit a netmap'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( netmap_edit_schema )
+ r = utils.remove_extra_items(http_content, netmap_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ #obtain data, check that only one exist
+ try:
+ nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
+ return http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_putnettmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
+ def http_action_datacenter_id(tenant_id, datacenter_id):
+ '''perform an action over datacenter, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( datacenter_action_schema )
+ r = utils.remove_extra_items(http_content, datacenter_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #obtain data, check that only one exist
+ result = nfvo.datacenter_action(mydb, tenant_id, datacenter_id, http_content)
+ if 'net-update' in http_content:
+ return http_getnetmap_datacenter_id(datacenter_id)
+ else:
+ return format_out(result)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_action_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
+ def http_delete_datacenter_id( datacenter_id):
+ '''delete a tenant from database, can use both uuid or name'''
+
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.delete_datacenter(mydb, datacenter_id)
+ return format_out({"result":"datacenter '" + data + "' deleted"})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
+ def http_associate_datacenters(tenant_id, datacenter_id):
+ '''associate an existing datacenter to a this tenant. '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( datacenter_associate_schema )
+ r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ id_ = nfvo.associate_datacenter_to_tenant(mydb, tenant_id, datacenter_id,
+ http_content['datacenter'].get('vim_tenant'),
+ http_content['datacenter'].get('vim_tenant_name'),
+ http_content['datacenter'].get('vim_username'),
+ http_content['datacenter'].get('vim_password'),
+ http_content['datacenter'].get('config')
+ )
+ return http_get_datacenter_id(tenant_id, id_)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_associate_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
++@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
++def http_associate_datacenters_edit(tenant_id, datacenter_id):
++ '''associate an existing datacenter to a this tenant. '''
++ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
++ #parse input data
++ http_content,_ = format_in( datacenter_associate_schema )
++ r = utils.remove_extra_items(http_content, datacenter_associate_schema)
++ if r:
++ logger.debug("Remove received extra items %s", str(r))
++ try:
++ id_ = nfvo.edit_datacenter_to_tenant(mydb, tenant_id, datacenter_id,
++ http_content['datacenter'].get('vim_tenant'),
++ http_content['datacenter'].get('vim_tenant_name'),
++ http_content['datacenter'].get('vim_username'),
++ http_content['datacenter'].get('vim_password'),
++ http_content['datacenter'].get('config')
++ )
++ return http_get_datacenter_id(tenant_id, id_)
++ except (nfvo.NfvoException, db_base_Exception) as e:
++ logger.error("http_associate_datacenters_edit error {}: {}".format(e.http_code, str(e)))
++ bottle.abort(e.http_code, str(e))
++ except Exception as e:
++ logger.error("Unexpected exception: ", exc_info=True)
++ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+ @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
+ def http_deassociate_datacenters(tenant_id, datacenter_id):
+ '''deassociate an existing datacenter to a this tenant. '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.deassociate_datacenter_to_tenant(mydb, tenant_id, datacenter_id)
+ return format_out({"result": data})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_deassociate_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
+ @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
+ def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.vim_action_get(mydb, tenant_id, datacenter_id, item, name)
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
+ def http_del_vim_items(tenant_id, datacenter_id, item, name):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.vim_action_delete(mydb, tenant_id, datacenter_id, item, name)
+ return format_out({"result":data})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_del_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
+ def http_post_vim_items(tenant_id, datacenter_id, item):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( object_schema )
+ try:
+ data = nfvo.vim_action_create(mydb, tenant_id, datacenter_id, item, http_content)
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
+ def http_get_vnfs(tenant_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid','name','description','public', "tenant_id", "created_at") )
+ where_or = {}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_,WHERE=where_,WHERE_OR=where_or, WHERE_AND_OR="AND",LIMIT=limit_)
+ #change_keys_http2db(content, http2db_vnf, reverse=True)
+ utils.convert_str2boolean(vnfs, ('public',))
+ convert_datetime2str(vnfs)
+ data={'vnfs' : vnfs}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vnfs error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
+ def http_get_vnf_id(tenant_id,vnf_id):
+ '''get vnf details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
+ utils.convert_str2boolean(vnf, ('public',))
+ convert_datetime2str(vnf)
+ return format_out(vnf)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vnf_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
+ def http_post_vnfs(tenant_id):
+ '''insert a vnf into the catalogue. Creates the flavor and images in the VIM, and creates the VNF and its internal structure in the OPENMANO DB'''
+ #print "Parsing the YAML file of the VNF"
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, used_schema = format_in( vnfd_schema_v01, ("schema_version",), {"0.2": vnfd_schema_v02})
+ r = utils.remove_extra_items(http_content, used_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ if used_schema == vnfd_schema_v01:
+ vnf_id = nfvo.new_vnf(mydb,tenant_id,http_content)
+ elif used_schema == vnfd_schema_v02:
+ vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
+ else:
+ logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+ bottle.abort(HTTP_Bad_Request, "Invalid schema version")
+ return http_get_vnf_id(tenant_id, vnf_id)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
+ def http_delete_vnf_id(tenant_id,vnf_id):
+ '''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #check valid tenant_id and deletes the vnf, including images,
+ try:
+ data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
+ #print json.dumps(data, indent=4)
+ return format_out({"result":"VNF " + data + " deleted"})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_vnf_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ #@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
+ #@bottle.route(url_base + '/<tenant_id>/physicalview/Madrid-Alcantara', method='GET')
+ @bottle.route(url_base + '/<tenant_id>/physicalview/<datacenter>', method='GET')
+ def http_get_hosts(tenant_id, datacenter):
+ '''get the tidvim host hopology from the vim.'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #print "http_get_hosts received by tenant " + tenant_id + ' datacenter ' + datacenter
+ try:
+ if datacenter == 'treeview':
+ data = nfvo.get_hosts(mydb, tenant_id)
+ else:
+ #openmano-gui is using a hardcoded value for the datacenter
+ result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
+
+ if result < 0:
+ #print "http_get_hosts error %d %s" % (-result, data)
+ bottle.abort(-result, data)
+ else:
+ convert_datetime2str(data)
+ #print json.dumps(data, indent=4)
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_hosts error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<path:path>', method='OPTIONS')
+ def http_options_deploy(path):
+ '''For some reason GUI web ask for OPTIONS that must be responded'''
+ #TODO: check correct path, and correct headers request
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ bottle.response.set_header('Access-Control-Allow-Methods','POST, GET, PUT, DELETE, OPTIONS')
+ bottle.response.set_header('Accept','application/yaml,application/json')
+ bottle.response.set_header('Content-Type','application/yaml,application/json')
+ bottle.response.set_header('Access-Control-Allow-Headers','content-type')
+ bottle.response.set_header('Access-Control-Allow-Origin','*')
+ return
+
+ @bottle.route(url_base + '/<tenant_id>/topology/deploy', method='POST')
+ def http_post_deploy(tenant_id):
+ '''post topology deploy.'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+ http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02})
+ #r = utils.remove_extra_items(http_content, used_schema)
+ #if r is not None: print "http_post_deploy: Warning: remove extra items ", r
+ #print "http_post_deploy input: ", http_content
+
+ try:
+ scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+ instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
+ #print json.dumps(data, indent=4)
+ return format_out(instance)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_deploy error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
+ def http_post_verify(tenant_id):
+ #TODO:
+ # '''post topology verify'''
+ # print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ return
+
+ #
+ # SCENARIOS
+ #
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios', method='POST')
+ def http_post_scenarios(tenant_id):
+ '''add a scenario into the catalogue. Creates the scenario and its internal structure in the OPENMANO DB'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02, "0.3": nsd_schema_v03})
+ #r = utils.remove_extra_items(http_content, used_schema)
+ #if r is not None: print "http_post_scenarios: Warning: remove extra items ", r
+ #print "http_post_scenarios input: ", http_content
+ try:
+ if used_schema == nsd_schema_v01:
+ scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+ elif used_schema == nsd_schema_v02:
+ scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.2")
+ elif used_schema == nsd_schema_v03:
+ scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
+ else:
+ logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+ bottle.abort(HTTP_Bad_Request, "Invalid schema version")
+ #print json.dumps(data, indent=4)
+ #return format_out(data)
+ return http_get_scenario_id(tenant_id, scenario_id)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
+ def http_post_scenario_action(tenant_id, scenario_id):
+ '''take an action over a scenario'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, _ = format_in(scenario_action_schema)
+ r = utils.remove_extra_items(http_content, scenario_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ # check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ if "start" in http_content:
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['start']['instance_name'], \
+ http_content['start'].get('description',http_content['start']['instance_name']),
+ http_content['start'].get('datacenter') )
+ return format_out(data)
+ elif "deploy" in http_content: #Equivalent to start
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['deploy']['instance_name'],
+ http_content['deploy'].get('description',http_content['deploy']['instance_name']),
+ http_content['deploy'].get('datacenter') )
+ return format_out(data)
+ elif "reserve" in http_content: #Reserve resources
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['reserve']['instance_name'],
+ http_content['reserve'].get('description',http_content['reserve']['instance_name']),
+ http_content['reserve'].get('datacenter'), startvms=False )
+ return format_out(data)
+ elif "verify" in http_content: #Equivalent to start and then delete
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['verify']['instance_name'],
+ http_content['verify'].get('description',http_content['verify']['instance_name']),
+ http_content['verify'].get('datacenter'), startvms=False )
+ instance_id = data['uuid']
+ nfvo.delete_instance(mydb, tenant_id,instance_id)
+ return format_out({"result":"Verify OK"})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_scenario_action error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
+ def http_get_scenarios(tenant_id):
+ '''get scenarios list'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'description', 'tenant_id', 'created_at', 'public'))
+ where_or={}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ scenarios = mydb.get_rows(SELECT=s, WHERE=w, WHERE_OR=where_or, WHERE_AND_OR="AND", LIMIT=l, FROM='scenarios')
+ convert_datetime2str(scenarios)
+ utils.convert_str2boolean(scenarios, ('public',) )
+ data={'scenarios':scenarios}
+ #print json.dumps(scenarios, indent=4)
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
+ def http_get_scenario_id(tenant_id, scenario_id):
+ '''get scenario details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ scenario = mydb.get_scenario(scenario_id, tenant_id)
+ convert_datetime2str(scenario)
+ data={'scenario' : scenario}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
+ def http_delete_scenario_id(tenant_id, scenario_id):
+ '''delete a scenario from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ data = mydb.delete_scenario(scenario_id, tenant_id)
+ #print json.dumps(data, indent=4)
+ return format_out({"result":"scenario " + data + " deleted"})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_scenario_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
+ def http_put_scenario_id(tenant_id, scenario_id):
+ '''edit an existing scenario id'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( scenario_edit_schema )
+ #r = utils.remove_extra_items(http_content, scenario_edit_schema)
+ #if r is not None: print "http_put_scenario_id: Warning: remove extra items ", r
+ #print "http_put_scenario_id input: ", http_content
+ try:
+ nfvo.edit_scenario(mydb, tenant_id, scenario_id, http_content)
+ #print json.dumps(data, indent=4)
+ #return format_out(data)
+ return http_get_scenario_id(tenant_id, scenario_id)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_put_scenario_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+ @bottle.route(url_base + '/<tenant_id>/instances', method='POST')
+ def http_post_instances(tenant_id):
+ '''create an instance-scenario'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, used_schema = format_in(instance_scenario_create_schema_v01)
+ r = utils.remove_extra_items(http_content, used_schema)
+ if r is not None:
+ logger.warning("http_post_instances: Warning: remove extra items %s", str(r))
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+ #
+ # INSTANCES
+ #
+ @bottle.route(url_base + '/<tenant_id>/instances', method='GET')
+ def http_get_instances(tenant_id):
+ '''get instance list'''
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
+ if tenant_id != "any":
+ w['tenant_id'] = tenant_id
+ instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
+ convert_datetime2str(instances)
+ utils.convert_str2boolean(instances, ('public',) )
+ data={'instances':instances}
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_instances error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
+ def http_get_instance_id(tenant_id, instance_id):
+ '''get instances details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ if tenant_id == "any":
+ tenant_id = None
+ #obtain data (first time is only to check that the instance exists)
+ instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
+ try:
+ nfvo.refresh_instance(mydb, tenant_id, instance_dict)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.warn("nfvo.refresh_instance couldn't refresh the status of the instance: %s" % str(e))
+ #obtain data with results upated
+ instance = mydb.get_instance_scenario(instance_id, tenant_id)
+ convert_datetime2str(instance)
+ #print json.dumps(instance, indent=4)
+ return format_out(instance)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
+ def http_delete_instance_id(tenant_id, instance_id):
+ '''delete instance from VIM and from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ if tenant_id == "any":
+ tenant_id = None
+ #obtain data
+ message = nfvo.delete_instance(mydb, tenant_id,instance_id)
+ return format_out({"result":message})
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_instance_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
+ def http_post_instance_scenario_action(tenant_id, instance_id):
+ '''take an action over a scenario instance'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, _ = format_in(instance_scenario_action_schema)
+ r = utils.remove_extra_items(http_content, instance_scenario_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+
+ #print "http_post_instance_scenario_action input: ", http_content
+ #obtain data
+ instance = mydb.get_instance_scenario(instance_id, tenant_id)
+ instance_id = instance["uuid"]
+
+ data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
+ return format_out(data)
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+ @bottle.error(400)
+ @bottle.error(401)
+ @bottle.error(404)
+ @bottle.error(403)
+ @bottle.error(405)
+ @bottle.error(406)
+ @bottle.error(409)
+ @bottle.error(503)
+ @bottle.error(500)
+ def error400(error):
+ e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
+ bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+ return format_out(e)
+
--- /dev/null
-
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+ __date__ ="$16-sep-2014 22:05:01$"
+
+ import imp
+ #import json
+ import yaml
+ import utils
+ import vim_thread
+ from db_base import HTTP_Unauthorized, HTTP_Bad_Request, HTTP_Internal_Server_Error, HTTP_Not_Found,\
+ HTTP_Conflict, HTTP_Method_Not_Allowed
+ import console_proxy_thread as cli
+ import vimconn
+ import logging
+ import collections
+ from db_base import db_base_Exception
++
+ import nfvo_db
+ from threading import Lock
+ from time import time
++import ovim as ovim_module
+
+ global global_config
+ global vimconn_imported
+ global logger
+ global default_volume_size
+ default_volume_size = '5' #size in GB
-task_dict = {}
++global ovim
++ovim = None
++global_config = None
+
+ vimconn_imported = {} # dictionary with VIM type as key, loaded module as value
+ vim_threads = {"running":{}, "deleting": {}, "names": []} # threads running for attached-VIMs
+ vim_persistent_info = {}
+ logger = logging.getLogger('openmano.nfvo')
+ task_lock = Lock()
-def new_task(name, params, store=True, depends=None):
++global_instance_tasks = {}
+ last_task_id = 0.0
+ db=None
+ db_lock=Lock()
+
+ class NfvoException(Exception):
+ def __init__(self, message, http_code):
+ self.http_code = http_code
+ Exception.__init__(self, message)
+
+
+ def get_task_id():
+ global last_task_id
+ task_id = time()
+ if task_id <= last_task_id:
+ task_id = last_task_id + 0.000001
+ last_task_id = task_id
+ return "TASK.{:.6f}".format(task_id)
+
+
- if store:
- task_dict[task_id] = task
++def new_task(name, params, depends=None):
+ task_id = get_task_id()
+ task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
+ if depends:
+ task["depends"] = depends
- extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id')}
+ return task
+
+
+ def is_task_id(id):
+ return True if id[:5] == "TASK." else False
+
+
+ def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id):
+ name = datacenter_name[:16]
+ if name not in vim_threads["names"]:
+ vim_threads["names"].append(name)
+ return name
+ name = datacenter_name[:16] + "." + tenant_name[:16]
+ if name not in vim_threads["names"]:
+ vim_threads["names"].append(name)
+ return name
+ name = datacenter_id + "-" + tenant_id
+ vim_threads["names"].append(name)
+ return name
+
+
+ def start_service(mydb):
+ global db, global_config
+ db = nfvo_db.nfvo_db()
+ db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
++ global ovim
++
++ # Initialize openvim for SDN control
++ # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
++ # TODO: review ovim.py to delete not needed configuration
++ ovim_configuration = {
++ 'logger_name': 'openmano.ovim',
++ 'network_vlan_range_start': 1000,
++ 'network_vlan_range_end': 4096,
++ 'db_name': global_config["db_ovim_name"],
++ 'db_host': global_config["db_ovim_host"],
++ 'db_user': global_config["db_ovim_user"],
++ 'db_passwd': global_config["db_ovim_passwd"],
++ 'bridge_ifaces': {},
++ 'mode': 'normal',
++ 'network_type': 'bridge',
++ #TODO: log_level_of should not be needed. To be modified in ovim
++ 'log_level_of': 'DEBUG'
++ }
++ ovim = ovim_module.ovim(ovim_configuration)
++ ovim.start_service()
++
+ from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+ select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
+ 'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
+ 'user','passwd', 'dt.config as dt_config', 'nfvo_tenant_id')
+ try:
+ vims = mydb.get_rows(FROM=from_, SELECT=select_)
+ for vim in vims:
- thread_id = vim["datacenter_id"] + "." + vim['nfvo_tenant_id']
++ extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
++ 'datacenter_id': vim.get('datacenter_id')}
+ if vim["config"]:
+ extra.update(yaml.load(vim["config"]))
+ if vim.get('dt_config'):
+ extra.update(yaml.load(vim["dt_config"]))
+ if vim["type"] not in vimconn_imported:
+ module_info=None
+ try:
+ module = "vimconn_" + vim["type"]
+ module_info = imp.find_module(module)
+ vim_conn = imp.load_module(vim["type"], *module_info)
+ vimconn_imported[vim["type"]] = vim_conn
+ except (IOError, ImportError) as e:
+ if module_info and module_info[0]:
+ file.close(module_info[0])
+ raise NfvoException("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
+ vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
+
- vim.get('datacenter_tenant_id'), db=db, db_lock=db_lock)
++ thread_id = vim['datacenter_tenant_id']
+ vim_persistent_info[thread_id] = {}
+ try:
+ #if not tenant:
+ # return -HTTP_Bad_Request, "You must provide a valid tenant name or uuid for VIM %s" % ( vim["type"])
+ myvim = vimconn_imported[ vim["type"] ].vimconnector(
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim['user'], passwd=vim['passwd'],
+ config=extra, persistent_info=vim_persistent_info[thread_id]
+ )
+ except Exception as e:
+ raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), HTTP_Internal_Server_Error)
+ thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'], vim['vim_tenant_id'])
+ new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, vim['datacenter_name'],
- thread.insert_task(new_task("exit", None, store=False))
++ vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
+ new_thread.start()
+ vim_threads["running"][thread_id] = new_thread
+ except db_base_Exception as e:
+ raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+
+
+ def stop_service():
++ global ovim, global_config
++ if ovim:
++ ovim.stop_service()
+ for thread_id,thread in vim_threads["running"].items():
- extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id')}
++ thread.insert_task(new_task("exit", None))
+ vim_threads["deleting"][thread_id] = thread
+ vim_threads["running"] = {}
++ if global_config and global_config.get("console_thread"):
++ for thread in global_config["console_thread"]:
++ thread.terminate = True
+
+
+ def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
+ '''Obtain flavorList
+ return result, content:
+ <0, error_text upon error
+ nb_records, flavor_list on success
+ '''
+ WHERE_dict={}
+ WHERE_dict['vnf_id'] = vnf_id
+ if nfvo_tenant is not None:
+ WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+
+ #result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
+ #result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
+ flavors = mydb.get_rows(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
+ #print "get_flavor_list result:", result
+ #print "get_flavor_list content:", content
+ flavorList=[]
+ for flavor in flavors:
+ flavorList.append(flavor['flavor_id'])
+ return flavorList
+
+
+ def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
+ '''Obtain imageList
+ return result, content:
+ <0, error_text upon error
+ nb_records, flavor_list on success
+ '''
+ WHERE_dict={}
+ WHERE_dict['vnf_id'] = vnf_id
+ if nfvo_tenant is not None:
+ WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+
+ #result, content = mydb.get_table(FROM='vms join vnfs on vms-vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
+ images = mydb.get_rows(FROM='vms join images on vms.image_id=images.uuid',SELECT=('image_id',),WHERE=WHERE_dict )
+ imageList=[]
+ for image in images:
+ imageList.append(image['image_id'])
+ return imageList
+
+
+ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, datacenter_tenant_id=None,
+ vim_tenant=None, vim_tenant_name=None, vim_user=None, vim_passwd=None):
+ '''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
+ return dictionary with {datacenter_id: vim_class, ... }. vim_class contain:
+ 'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
+ raise exception upon error
+ '''
+ WHERE_dict={}
+ if nfvo_tenant is not None: WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+ if datacenter_id is not None: WHERE_dict['d.uuid'] = datacenter_id
+ if datacenter_tenant_id is not None: WHERE_dict['datacenter_tenant_id'] = datacenter_tenant_id
+ if datacenter_name is not None: WHERE_dict['d.name'] = datacenter_name
+ if vim_tenant is not None: WHERE_dict['dt.vim_tenant_id'] = vim_tenant
+ if vim_tenant_name is not None: WHERE_dict['vim_tenant_name'] = vim_tenant_name
+ if nfvo_tenant or vim_tenant or vim_tenant_name or datacenter_tenant_id:
+ from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+ select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
+ 'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
+ 'user','passwd', 'dt.config as dt_config')
+ else:
+ from_ = 'datacenters as d'
+ select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
+ try:
+ vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
+ vim_dict={}
+ for vim in vims:
- if 'nfvo_tenant_id' in vim:
- thread_id = vim["datacenter_id"] + "." + vim['nfvo_tenant_id']
++ extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
++ 'datacenter_id': vim.get('datacenter_id')}
+ if vim["config"]:
+ extra.update(yaml.load(vim["config"]))
+ if vim.get('dt_config'):
+ extra.update(yaml.load(vim["dt_config"]))
+ if vim["type"] not in vimconn_imported:
+ module_info=None
+ try:
+ module = "vimconn_" + vim["type"]
+ module_info = imp.find_module(module)
+ vim_conn = imp.load_module(vim["type"], *module_info)
+ vimconn_imported[vim["type"]] = vim_conn
+ except (IOError, ImportError) as e:
+ if module_info and module_info[0]:
+ file.close(module_info[0])
+ raise NfvoException("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
+ vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
+
+ try:
-def get_vim_thread(tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
++ if 'datacenter_tenant_id' in vim:
++ thread_id = vim["datacenter_tenant_id"]
+ if thread_id not in vim_persistent_info:
+ vim_persistent_info[thread_id] = {}
+ persistent_info = vim_persistent_info[thread_id]
+ else:
+ persistent_info = {}
+ #if not tenant:
+ # return -HTTP_Bad_Request, "You must provide a valid tenant name or uuid for VIM %s" % ( vim["type"])
+ vim_dict[ vim['datacenter_id'] ] = vimconn_imported[ vim["type"] ].vimconnector(
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim.get('vim_tenant_id',vim_tenant),
+ tenant_name=vim.get('vim_tenant_name',vim_tenant_name),
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim.get('user',vim_user), passwd=vim.get('passwd',vim_passwd),
+ config=extra, persistent_info=persistent_info
+ )
+ except Exception as e:
+ raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), HTTP_Internal_Server_Error)
+ return vim_dict
+ except db_base_Exception as e:
+ raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+
+
+ def rollback(mydb, vims, rollback_list):
+ undeleted_items=[]
+ #delete things by reverse order
+ for i in range(len(rollback_list)-1, -1, -1):
+ item = rollback_list[i]
+ if item["where"]=="vim":
+ if item["vim_id"] not in vims:
+ continue
+ vim=vims[ item["vim_id"] ]
+ try:
+ if item["what"]=="image":
+ vim.delete_image(item["uuid"])
+ mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_id": vim["id"], "vim_id":item["uuid"]})
+ elif item["what"]=="flavor":
+ vim.delete_flavor(item["uuid"])
+ mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_id": vim["id"], "vim_id":item["uuid"]})
+ elif item["what"]=="network":
+ vim.delete_network(item["uuid"])
+ elif item["what"]=="vm":
+ vim.delete_vminstance(item["uuid"])
+ except vimconn.vimconnException as e:
+ logger.error("Error in rollback. Not possible to delete VIM %s '%s'. Message: %s", item['what'], item["uuid"], str(e))
+ undeleted_items.append("{} {} from VIM {}".format(item['what'], item["uuid"], vim["name"]))
+ except db_base_Exception as e:
+ logger.error("Error in rollback. Not possible to delete %s '%s' from DB.datacenters Message: %s", item['what'], item["uuid"], str(e))
+
+ else: # where==mano
+ try:
+ if item["what"]=="image":
+ mydb.delete_row(FROM="images", WHERE={"uuid": item["uuid"]})
+ elif item["what"]=="flavor":
+ mydb.delete_row(FROM="flavors", WHERE={"uuid": item["uuid"]})
+ except db_base_Exception as e:
+ logger.error("Error in rollback. Not possible to delete %s '%s' from DB. Message: %s", item['what'], item["uuid"], str(e))
+ undeleted_items.append("{} '{}'".format(item['what'], item["uuid"]))
+ if len(undeleted_items)==0:
+ return True," Rollback successful."
+ else:
+ return False," Rollback fails to delete: " + str(undeleted_items)
+
+
+ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
+ global global_config
+ #create a dictionary with vnfc-name: vnfc:interface-list key:values pairs
+ vnfc_interfaces={}
+ for vnfc in vnf_descriptor["vnf"]["VNFC"]:
+ name_dict = {}
+ #dataplane interfaces
+ for numa in vnfc.get("numas",() ):
+ for interface in numa.get("interfaces",()):
+ if interface["name"] in name_dict:
+ raise NfvoException(
+ "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
+ vnfc["name"], interface["name"]),
+ HTTP_Bad_Request)
+ name_dict[ interface["name"] ] = "underlay"
+ #bridge interfaces
+ for interface in vnfc.get("bridge-ifaces",() ):
+ if interface["name"] in name_dict:
+ raise NfvoException(
+ "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
+ vnfc["name"], interface["name"]),
+ HTTP_Bad_Request)
+ name_dict[ interface["name"] ] = "overlay"
+ vnfc_interfaces[ vnfc["name"] ] = name_dict
+ # check bood-data info
+ if "boot-data" in vnfc:
+ # check that user-data is incompatible with users and config-files
+ if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
+ raise NfvoException(
+ "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
+ HTTP_Bad_Request)
+
+ #check if the info in external_connections matches with the one in the vnfcs
+ name_list=[]
+ for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
+ if external_connection["name"] in name_list:
+ raise NfvoException(
+ "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
+ external_connection["name"]),
+ HTTP_Bad_Request)
+ name_list.append(external_connection["name"])
+ if external_connection["VNFC"] not in vnfc_interfaces:
+ raise NfvoException(
+ "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
+ external_connection["name"], external_connection["VNFC"]),
+ HTTP_Bad_Request)
+
+ if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
+ raise NfvoException(
+ "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+ external_connection["name"],
+ external_connection["local_iface_name"]),
+ HTTP_Bad_Request )
+
+ #check if the info in internal_connections matches with the one in the vnfcs
+ name_list=[]
+ for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
+ if internal_connection["name"] in name_list:
+ raise NfvoException(
+ "Error at vnf:internal-connections:name, value '%s' already used as an internal-connection".format(
+ internal_connection["name"]),
+ HTTP_Bad_Request)
+ name_list.append(internal_connection["name"])
+ #We should check that internal-connections of type "ptp" have only 2 elements
+
+ if len(internal_connection["elements"])>2 and (internal_connection.get("type") == "ptp" or internal_connection.get("type") == "e-line"):
+ raise NfvoException(
+ "Error at 'vnf:internal-connections[name:'{}']:elements', size must be 2 for a '{}' type. Consider change it to '{}' type".format(
+ internal_connection["name"],
+ 'ptp' if vnf_descriptor_version==1 else 'e-line',
+ 'data' if vnf_descriptor_version==1 else "e-lan"),
+ HTTP_Bad_Request)
+ for port in internal_connection["elements"]:
+ vnf = port["VNFC"]
+ iface = port["local_iface_name"]
+ if vnf not in vnfc_interfaces:
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
+ internal_connection["name"], vnf),
+ HTTP_Bad_Request)
+ if iface not in vnfc_interfaces[ vnf ]:
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+ internal_connection["name"], iface),
+ HTTP_Bad_Request)
+ return -HTTP_Bad_Request,
+ if vnf_descriptor_version==1 and "type" not in internal_connection:
+ if vnfc_interfaces[vnf][iface] == "overlay":
+ internal_connection["type"] = "bridge"
+ else:
+ internal_connection["type"] = "data"
+ if vnf_descriptor_version==2 and "implementation" not in internal_connection:
+ if vnfc_interfaces[vnf][iface] == "overlay":
+ internal_connection["implementation"] = "overlay"
+ else:
+ internal_connection["implementation"] = "underlay"
+ if (internal_connection.get("type") == "data" or internal_connection.get("type") == "ptp" or \
+ internal_connection.get("implementation") == "underlay") and vnfc_interfaces[vnf][iface] == "overlay":
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+ internal_connection["name"],
+ iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
+ 'data' if vnf_descriptor_version==1 else 'underlay'),
+ HTTP_Bad_Request)
+ if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
+ vnfc_interfaces[vnf][iface] == "underlay":
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+ internal_connection["name"], iface,
+ 'data' if vnf_descriptor_version==1 else 'underlay',
+ 'bridge' if vnf_descriptor_version==1 else 'overlay'),
+ HTTP_Bad_Request)
+
+
+ def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+ #look if image exist
+ if only_create_at_vim:
+ image_mano_id = image_dict['uuid']
+ if return_on_error == None:
+ return_on_error = True
+ else:
+ if image_dict['location']:
+ images = mydb.get_rows(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
+ else:
+ images = mydb.get_rows(FROM="images", WHERE={'universal_name':image_dict['universal_name'], 'checksum':image_dict['checksum']})
+ if len(images)>=1:
+ image_mano_id = images[0]['uuid']
+ else:
+ #create image in MANO DB
+ temp_image_dict={'name':image_dict['name'], 'description':image_dict.get('description',None),
+ 'location':image_dict['location'], 'metadata':image_dict.get('metadata',None),
+ 'universal_name':image_dict['universal_name'] , 'checksum':image_dict['checksum']
+ }
+ #temp_image_dict['location'] = image_dict.get('new_location') if image_dict['location'] is None
+ image_mano_id = mydb.new_row('images', temp_image_dict, add_uuid=True)
+ rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
+ #create image at every vim
+ for vim_id,vim in vims.iteritems():
+ image_created="false"
+ #look at database
+ image_db = mydb.get_rows(FROM="datacenters_images", WHERE={'datacenter_id':vim_id, 'image_id':image_mano_id})
+ #look at VIM if this image exist
+ try:
+ if image_dict['location'] is not None:
+ image_vim_id = vim.get_image_id_from_path(image_dict['location'])
+ else:
+ filter_dict = {}
+ filter_dict['name'] = image_dict['universal_name']
+ if image_dict.get('checksum') != None:
+ filter_dict['checksum'] = image_dict['checksum']
+ #logger.debug('>>>>>>>> Filter dict: %s', str(filter_dict))
+ vim_images = vim.get_image_list(filter_dict)
+ #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
+ if len(vim_images) > 1:
+ raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), HTTP_Conflict)
+ elif len(vim_images) == 0:
+ raise vimconn.vimconnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
+ else:
+ #logger.debug('>>>>>>>> VIM image 0: %s', str(vim_images[0]))
+ image_vim_id = vim_images[0]['id']
+
+ except vimconn.vimconnNotFoundException as e:
+ #Create the image in VIM only if image_dict['location'] or image_dict['new_location'] is not None
+ try:
+ #image_dict['location']=image_dict.get('new_location') if image_dict['location'] is None
+ if image_dict['location']:
+ image_vim_id = vim.new_image(image_dict)
+ rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
+ image_created="true"
+ else:
+ #If we reach this point, then the image has image name, and optionally checksum, and could not be found
+ raise vimconn.vimconnException(str(e))
+ except vimconn.vimconnException as e:
+ if return_on_error:
+ logger.error("Error creating image at VIM '%s': %s", vim["name"], str(e))
+ raise
+ image_vim_id = None
+ logger.warn("Error creating image at VIM '%s': %s", vim["name"], str(e))
+ continue
+ except vimconn.vimconnException as e:
+ if return_on_error:
+ logger.error("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+ raise
+ logger.warn("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+ image_vim_id = None
+ continue
+ #if we reach here, the image has been created or existed
+ if len(image_db)==0:
+ #add new vim_id at datacenters_images
+ mydb.new_row('datacenters_images', {'datacenter_id':vim_id, 'image_id':image_mano_id, 'vim_id': image_vim_id, 'created':image_created})
+ elif image_db[0]["vim_id"]!=image_vim_id:
+ #modify existing vim_id at datacenters_images
+ mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_id':vim_id, 'image_id':image_mano_id})
+
+ return image_vim_id if only_create_at_vim else image_mano_id
+
+
+ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+ temp_flavor_dict= {'disk':flavor_dict.get('disk',1),
+ 'ram':flavor_dict.get('ram'),
+ 'vcpus':flavor_dict.get('vcpus'),
+ }
+ if 'extended' in flavor_dict and flavor_dict['extended']==None:
+ del flavor_dict['extended']
+ if 'extended' in flavor_dict:
+ temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
+
+ #look if flavor exist
+ if only_create_at_vim:
+ flavor_mano_id = flavor_dict['uuid']
+ if return_on_error == None:
+ return_on_error = True
+ else:
+ flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+ if len(flavors)>=1:
+ flavor_mano_id = flavors[0]['uuid']
+ else:
+ #create flavor
+ #create one by one the images of aditional disks
+ dev_image_list=[] #list of images
+ if 'extended' in flavor_dict and flavor_dict['extended']!=None:
+ dev_nb=0
+ for device in flavor_dict['extended'].get('devices',[]):
+ if "image" not in device and "image name" not in device:
+ continue
+ image_dict={}
+ image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+ image_dict['universal_name']=device.get('image name')
+ image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+ image_dict['location']=device.get('image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=device.get('image checksum')
+ image_metadata_dict = device.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict != None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
+ dev_image_list.append(image_id)
+ dev_nb += 1
+ temp_flavor_dict['name'] = flavor_dict['name']
+ temp_flavor_dict['description'] = flavor_dict.get('description',None)
+ content = mydb.new_row('flavors', temp_flavor_dict, add_uuid=True)
+ flavor_mano_id= content
+ rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
+ #create flavor at every vim
+ if 'uuid' in flavor_dict:
+ del flavor_dict['uuid']
+ flavor_vim_id=None
+ for vim_id,vim in vims.items():
+ flavor_created="false"
+ #look at database
+ flavor_db = mydb.get_rows(FROM="datacenters_flavors", WHERE={'datacenter_id':vim_id, 'flavor_id':flavor_mano_id})
+ #look at VIM if this flavor exist SKIPPED
+ #res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
+ #if res_vim < 0:
+ # print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
+ # continue
+ #elif res_vim==0:
+
+ #Create the flavor in VIM
+ #Translate images at devices from MANO id to VIM id
+ disk_list = []
+ if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
+ #make a copy of original devices
+ devices_original=[]
+
+ for device in flavor_dict["extended"].get("devices",[]):
+ dev={}
+ dev.update(device)
+ devices_original.append(dev)
+ if 'image' in device:
+ del device['image']
+ if 'image metadata' in device:
+ del device['image metadata']
+ dev_nb=0
+ for index in range(0,len(devices_original)) :
+ device=devices_original[index]
+ if "image" not in device and "image name" not in device:
+ if 'size' in device:
+ disk_list.append({'size': device.get('size', default_volume_size)})
+ continue
+ image_dict={}
+ image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+ image_dict['universal_name']=device.get('image name')
+ image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+ image_dict['location']=device.get('image')
+ #image_dict['new_location']=device.get('image location')
+ image_dict['checksum']=device.get('image checksum')
+ image_metadata_dict = device.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict != None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
+ image_dict["uuid"]=image_mano_id
+ image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
+
+ #save disk information (image must be based on and size
+ disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
+
+ flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
+ dev_nb += 1
+ if len(flavor_db)>0:
+ #check that this vim_id exist in VIM, if not create
+ flavor_vim_id=flavor_db[0]["vim_id"]
+ try:
+ vim.get_flavor(flavor_vim_id)
+ continue #flavor exist
+ except vimconn.vimconnException:
+ pass
+ #create flavor at vim
+ logger.debug("nfvo.create_or_use_flavor() adding flavor to VIM %s", vim["name"])
+ try:
+ flavor_vim_id = None
+ flavor_vim_id=vim.get_flavor_id_from_data(flavor_dict)
+ flavor_create="false"
+ except vimconn.vimconnException as e:
+ pass
+ try:
+ if not flavor_vim_id:
+ flavor_vim_id = vim.new_flavor(flavor_dict)
+ rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
+ flavor_created="true"
+ except vimconn.vimconnException as e:
+ if return_on_error:
+ logger.error("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+ raise
+ logger.warn("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+ flavor_vim_id = None
+ continue
+ #if reach here the flavor has been create or exist
+ if len(flavor_db)==0:
+ #add new vim_id at datacenters_flavors
+ extended_devices_yaml = None
+ if len(disk_list) > 0:
+ extended_devices = dict()
+ extended_devices['disks'] = disk_list
+ extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
+ mydb.new_row('datacenters_flavors',
+ {'datacenter_id':vim_id, 'flavor_id':flavor_mano_id, 'vim_id': flavor_vim_id,
+ 'created':flavor_created,'extended': extended_devices_yaml})
+ elif flavor_db[0]["vim_id"]!=flavor_vim_id:
+ #modify existing vim_id at datacenters_flavors
+ mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id}, WHERE={'datacenter_id':vim_id, 'flavor_id':flavor_mano_id})
+
+ return flavor_vim_id if only_create_at_vim else flavor_mano_id
+
+
+ def new_vnf(mydb, tenant_id, vnf_descriptor):
+ global global_config
+
+ # Step 1. Check the VNF descriptor
+ check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1)
+ # Step 2. Check tenant exist
+ vims = {}
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in vnf_descriptor["vnf"]:
+ if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+ HTTP_Unauthorized)
+ else:
+ vnf_descriptor['vnf']['tenant_id'] = tenant_id
+ # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+ if global_config["auto_push_VNF_to_VIMs"]:
+ vims = get_vim(mydb, tenant_id)
+
+ # Step 4. Review the descriptor and add missing fields
+ #print vnf_descriptor
+ #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+ vnf_name = vnf_descriptor['vnf']['name']
+ vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+ if "physical" in vnf_descriptor['vnf']:
+ del vnf_descriptor['vnf']['physical']
+ #print vnf_descriptor
+
+ # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+ logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+ logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+ #For each VNFC, we add it to the VNFCDict and we create a flavor.
+ VNFCDict = {} # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+ rollback_list = [] # It will contain the new images created in mano. It is used for rollback
+ try:
+ logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ VNFCitem={}
+ VNFCitem["name"] = vnfc['name']
+ VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
+
+ #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+ myflavorDict = {}
+ myflavorDict["name"] = vnfc['name']+"-flv" #Maybe we could rename the flavor by using the field "image name" if exists
+ myflavorDict["description"] = VNFCitem["description"]
+ myflavorDict["ram"] = vnfc.get("ram", 0)
+ myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+ myflavorDict["disk"] = vnfc.get("disk", 1)
+ myflavorDict["extended"] = {}
+
+ devices = vnfc.get("devices")
+ if devices != None:
+ myflavorDict["extended"]["devices"] = devices
+
+ # TODO:
+ # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+ # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+ # Previous code has been commented
+ #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+ #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+ #else:
+ # result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+ # if result2:
+ # print "Error creating flavor: unknown processor model. Rollback successful."
+ # return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+ # else:
+ # return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+ myflavorDict['extended']['processor_ranking'] = 100 #Hardcoded value, while we decide when the mapping is done
+
+ if 'numas' in vnfc and len(vnfc['numas'])>0:
+ myflavorDict['extended']['numas'] = vnfc['numas']
+
+ #print myflavorDict
+
+ # Step 6.2 New flavors are created in the VIM
+ flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+ #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+ VNFCitem["flavor_id"] = flavor_id
+ VNFCDict[vnfc['name']] = VNFCitem
+
+ logger.debug("Creating new images in the VIM for each VNFC")
+ # Step 6.3 New images are created in the VIM
+ #For each VNFC, we must create the appropriate image.
+ #This "for" loop might be integrated with the previous one
+ #In case this integration is made, the VNFCDict might become a VNFClist.
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+ image_dict={}
+ image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+ image_dict['universal_name']=vnfc.get('image name')
+ image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+ image_dict['location']=vnfc.get('VNFC image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=vnfc.get('image checksum')
+ image_metadata_dict = vnfc.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict is not None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+ VNFCDict[vnfc['name']]["image_id"] = image_id
+ VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+ if vnfc.get("boot-data"):
+ VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+
+ # Step 7. Storing the VNF descriptor in the repository
+ if "descriptor" not in vnf_descriptor["vnf"]:
+ vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+ # Step 8. Adding the VNF to the NFVO DB
+ vnf_id = mydb.new_vnf_as_a_whole(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+ return vnf_id
+ except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+ _, message = rollback(mydb, vims, rollback_list)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ elif isinstance(e, KeyError):
+ error_text = "KeyError exception "
+ e.http_code = HTTP_Internal_Server_Error
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
+ global global_config
+
+ # Step 1. Check the VNF descriptor
+ check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=2)
+ # Step 2. Check tenant exist
+ vims = {}
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in vnf_descriptor["vnf"]:
+ if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+ HTTP_Unauthorized)
+ else:
+ vnf_descriptor['vnf']['tenant_id'] = tenant_id
+ # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+ if global_config["auto_push_VNF_to_VIMs"]:
+ vims = get_vim(mydb, tenant_id)
+
+ # Step 4. Review the descriptor and add missing fields
+ #print vnf_descriptor
+ #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+ vnf_name = vnf_descriptor['vnf']['name']
+ vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+ if "physical" in vnf_descriptor['vnf']:
+ del vnf_descriptor['vnf']['physical']
+ #print vnf_descriptor
+
+ # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+ logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+ logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+ #For each VNFC, we add it to the VNFCDict and we create a flavor.
+ VNFCDict = {} # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+ rollback_list = [] # It will contain the new images created in mano. It is used for rollback
+ try:
+ logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ VNFCitem={}
+ VNFCitem["name"] = vnfc['name']
+ VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
+
+ #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+ myflavorDict = {}
+ myflavorDict["name"] = vnfc['name']+"-flv" #Maybe we could rename the flavor by using the field "image name" if exists
+ myflavorDict["description"] = VNFCitem["description"]
+ myflavorDict["ram"] = vnfc.get("ram", 0)
+ myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+ myflavorDict["disk"] = vnfc.get("disk", 1)
+ myflavorDict["extended"] = {}
+
+ devices = vnfc.get("devices")
+ if devices != None:
+ myflavorDict["extended"]["devices"] = devices
+
+ # TODO:
+ # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+ # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+ # Previous code has been commented
+ #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+ #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+ #else:
+ # result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+ # if result2:
+ # print "Error creating flavor: unknown processor model. Rollback successful."
+ # return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+ # else:
+ # return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+ myflavorDict['extended']['processor_ranking'] = 100 #Hardcoded value, while we decide when the mapping is done
+
+ if 'numas' in vnfc and len(vnfc['numas'])>0:
+ myflavorDict['extended']['numas'] = vnfc['numas']
+
+ #print myflavorDict
+
+ # Step 6.2 New flavors are created in the VIM
+ flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+ #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+ VNFCitem["flavor_id"] = flavor_id
+ VNFCDict[vnfc['name']] = VNFCitem
+
+ logger.debug("Creating new images in the VIM for each VNFC")
+ # Step 6.3 New images are created in the VIM
+ #For each VNFC, we must create the appropriate image.
+ #This "for" loop might be integrated with the previous one
+ #In case this integration is made, the VNFCDict might become a VNFClist.
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+ image_dict={}
+ image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+ image_dict['universal_name']=vnfc.get('image name')
+ image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+ image_dict['location']=vnfc.get('VNFC image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=vnfc.get('image checksum')
+ image_metadata_dict = vnfc.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict is not None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+ VNFCDict[vnfc['name']]["image_id"] = image_id
+ VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+ if vnfc.get("boot-data"):
+ VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+ # Step 7. Storing the VNF descriptor in the repository
+ if "descriptor" not in vnf_descriptor["vnf"]:
+ vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+ # Step 8. Adding the VNF to the NFVO DB
+ vnf_id = mydb.new_vnf_as_a_whole2(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+ return vnf_id
+ except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+ _, message = rollback(mydb, vims, rollback_list)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ elif isinstance(e, KeyError):
+ error_text = "KeyError exception "
+ e.http_code = HTTP_Internal_Server_Error
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+ def get_vnf_id(mydb, tenant_id, vnf_id):
+ #check valid tenant_id
+ check_tenant(mydb, tenant_id)
+ #obtain data
+ where_or = {}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+
+ vnf_id=vnf["uuid"]
+ filter_keys = ('uuid','name','description','public', "tenant_id", "created_at")
+ filtered_content = dict( (k,v) for k,v in vnf.iteritems() if k in filter_keys )
+ #change_keys_http2db(filtered_content, http2db_vnf, reverse=True)
+ data={'vnf' : filtered_content}
+ #GET VM
+ content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id',
+ SELECT=('vms.uuid as uuid','vms.name as name', 'vms.description as description', 'boot_data'),
+ WHERE={'vnfs.uuid': vnf_id} )
+ if len(content)==0:
+ raise NfvoException("vnf '{}' not found".format(vnf_id), HTTP_Not_Found)
+ # change boot_data into boot-data
+ for vm in content:
+ if vm.get("boot_data"):
+ vm["boot-data"] = yaml.safe_load(vm["boot_data"])
+ del vm["boot_data"]
+
+ data['vnf']['VNFC'] = content
+ #TODO: GET all the information from a VNFC and include it in the output.
+
+ #GET NET
+ content = mydb.get_rows(FROM='vnfs join nets on vnfs.uuid=nets.vnf_id',
+ SELECT=('nets.uuid as uuid','nets.name as name','nets.description as description', 'nets.type as type', 'nets.multipoint as multipoint'),
+ WHERE={'vnfs.uuid': vnf_id} )
+ data['vnf']['nets'] = content
+
+ #GET ip-profile for each net
+ for net in data['vnf']['nets']:
+ ipprofiles = mydb.get_rows(FROM='ip_profiles',
+ SELECT=('ip_version','subnet_address','gateway_address','dns_address','dhcp_enabled','dhcp_start_address','dhcp_count'),
+ WHERE={'net_id': net["uuid"]} )
+ if len(ipprofiles)==1:
+ net["ip_profile"] = ipprofiles[0]
+ elif len(ipprofiles)>1:
+ raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), HTTP_Bad_Request)
+
+
+ #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
+
+ #GET External Interfaces
+ content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces on vms.uuid=interfaces.vm_id',\
+ SELECT=('interfaces.uuid as uuid','interfaces.external_name as external_name', 'vms.name as vm_name', 'interfaces.vm_id as vm_id', \
+ 'interfaces.internal_name as internal_name', 'interfaces.type as type', 'interfaces.vpci as vpci','interfaces.bw as bw'),\
+ WHERE={'vnfs.uuid': vnf_id},
+ WHERE_NOT={'interfaces.external_name': None} )
+ #print content
+ data['vnf']['external-connections'] = content
+
+ return data
+
+
+ def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
+ # Check tenant exist
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ # Get the URL of the VIM from the nfvo_tenant and the datacenter
+ vims = get_vim(mydb, tenant_id)
+ else:
+ vims={}
+
+ # Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
+ where_or = {}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+ vnf_id = vnf["uuid"]
+
+ # "Getting the list of flavors and tenants of the VNF"
+ flavorList = get_flavorlist(mydb, vnf_id)
+ if len(flavorList)==0:
+ logger.warn("delete_vnf error. No flavors found for the VNF id '%s'", vnf_id)
+
+ imageList = get_imagelist(mydb, vnf_id)
+ if len(imageList)==0:
+ logger.warn( "delete_vnf error. No images found for the VNF id '%s'", vnf_id)
+
+ deleted = mydb.delete_row_by_id('vnfs', vnf_id)
+ if deleted == 0:
+ raise NfvoException("vnf '{}' not found".format(vnf_id), HTTP_Not_Found)
+
+ undeletedItems = []
+ for flavor in flavorList:
+ #check if flavor is used by other vnf
+ try:
+ c = mydb.get_rows(FROM='vms', WHERE={'flavor_id':flavor} )
+ if len(c) > 0:
+ logger.debug("Flavor '%s' not deleted because it is being used by another VNF", flavor)
+ continue
+ #flavor not used, must be deleted
+ #delelte at VIM
+ c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id':flavor})
+ for flavor_vim in c:
+ if flavor_vim["datacenter_id"] not in vims:
+ continue
+ if flavor_vim['created']=='false': #skip this flavor because not created by openmano
+ continue
+ myvim=vims[ flavor_vim["datacenter_id"] ]
+ try:
+ myvim.delete_flavor(flavor_vim["vim_id"])
+ except vimconn.vimconnNotFoundException as e:
+ logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"], flavor_vim["datacenter_id"] )
+ except vimconn.vimconnException as e:
+ logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s",
+ flavor_vim["vim_id"], flavor_vim["datacenter_id"], type(e).__name__, str(e))
+ undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"], flavor_vim["datacenter_id"] ))
+ #delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
+ mydb.delete_row_by_id('flavors', flavor)
+ except db_base_Exception as e:
+ logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e))
+ undeletedItems.append("flavor %s" % flavor)
+
+
+ for image in imageList:
+ try:
+ #check if image is used by other vnf
+ c = mydb.get_rows(FROM='vms', WHERE={'image_id':image} )
+ if len(c) > 0:
+ logger.debug("Image '%s' not deleted because it is being used by another VNF", image)
+ continue
+ #image not used, must be deleted
+ #delelte at VIM
+ c = mydb.get_rows(FROM='datacenters_images', WHERE={'image_id':image})
+ for image_vim in c:
+ if image_vim["datacenter_id"] not in vims:
+ continue
+ if image_vim['created']=='false': #skip this image because not created by openmano
+ continue
+ myvim=vims[ image_vim["datacenter_id"] ]
+ try:
+ myvim.delete_image(image_vim["vim_id"])
+ except vimconn.vimconnNotFoundException as e:
+ logger.warn("VIM image %s not exist at datacenter %s", image_vim["vim_id"], image_vim["datacenter_id"] )
+ except vimconn.vimconnException as e:
+ logger.error("Not possible to delete VIM image %s from datacenter %s: %s %s",
+ image_vim["vim_id"], image_vim["datacenter_id"], type(e).__name__, str(e))
+ undeletedItems.append("image {} from VIM {}".format(image_vim["vim_id"], image_vim["datacenter_id"] ))
+ #delete image from Database, using table images and with cascade foreign key also at datacenters_images
+ mydb.delete_row_by_id('images', image)
+ except db_base_Exception as e:
+ logger.error("delete_vnf_error. Not possible to get image details and delete '%s'. %s", image, str(e))
+ undeletedItems.append("image %s" % image)
+
+ return vnf_id + " " + vnf["name"]
+ #if undeletedItems:
+ # return "delete_vnf. Undeleted: %s" %(undeletedItems)
+
+
+ def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
+ result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
+ if result < 0:
+ return result, vims
+ elif result == 0:
+ return -HTTP_Not_Found, "datacenter '%s' not found" % datacenter_name
+ myvim = vims.values()[0]
+ result,servers = myvim.get_hosts_info()
+ if result < 0:
+ return result, servers
+ topology = {'name':myvim['name'] , 'servers': servers}
+ return result, topology
+
+
+ def get_hosts(mydb, nfvo_tenant_id):
+ vims = get_vim(mydb, nfvo_tenant_id)
+ if len(vims) == 0:
+ raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), HTTP_Not_Found)
+ elif len(vims)>1:
+ #print "nfvo.datacenter_action() error. Several datacenters found"
+ raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+ myvim = vims.values()[0]
+ try:
+ hosts = myvim.get_hosts()
+ logger.debug('VIM hosts response: '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False))
+
+ datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
+ for host in hosts:
+ server={'name':host['name'], 'vms':[]}
+ for vm in host['instances']:
+ #get internal name and model
+ try:
+ c = mydb.get_rows(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
+ WHERE={'vim_vm_id':vm['id']} )
+ if len(c) == 0:
+ logger.warn("nfvo.get_hosts virtual machine at VIM '{}' not found at tidnfvo".format(vm['id']))
+ continue
+ server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
+
+ except db_base_Exception as e:
+ logger.warn("nfvo.get_hosts virtual machine at VIM '{}' error {}".format(vm['id'], str(e)))
+ datacenter['Datacenters'][0]['servers'].append(server)
+ #return -400, "en construccion"
+
+ #print 'datacenters '+ json.dumps(datacenter, indent=4)
+ return datacenter
+ except vimconn.vimconnException as e:
+ raise NfvoException("Not possible to get_host_list from VIM: {}".format(str(e)), e.http_code)
+
+
+ def new_scenario(mydb, tenant_id, topo):
+
+ # result, vims = get_vim(mydb, tenant_id)
+ # if result < 0:
+ # return result, vims
+ #1: parse input
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in topo:
+ if topo["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
+ HTTP_Unauthorized)
+ else:
+ tenant_id=None
+
+ #1.1: get VNFs and external_networks (other_nets).
+ vnfs={}
+ other_nets={} #external_networks, bridge_networks and data_networkds
+ nodes = topo['topology']['nodes']
+ for k in nodes.keys():
+ if nodes[k]['type'] == 'VNF':
+ vnfs[k] = nodes[k]
+ vnfs[k]['ifaces'] = {}
+ elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
+ other_nets[k] = nodes[k]
+ other_nets[k]['external']=True
+ elif nodes[k]['type'] == 'network':
+ other_nets[k] = nodes[k]
+ other_nets[k]['external']=False
+
+
+ #1.2: Check that VNF are present at database table vnfs. Insert uuid, description and external interfaces
+ for name,vnf in vnfs.items():
+ where={}
+ where_or={"tenant_id": tenant_id, 'public': "true"}
+ error_text = ""
+ error_pos = "'topology':'nodes':'" + name + "'"
+ if 'vnf_id' in vnf:
+ error_text += " 'vnf_id' " + vnf['vnf_id']
+ where['uuid'] = vnf['vnf_id']
+ if 'VNF model' in vnf:
+ error_text += " 'VNF model' " + vnf['VNF model']
+ where['name'] = vnf['VNF model']
+ if len(where) == 0:
+ raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, HTTP_Bad_Request)
+
+ vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
+ FROM='vnfs',
+ WHERE=where,
+ WHERE_OR=where_or,
+ WHERE_AND_OR="AND")
+ if len(vnf_db)==0:
+ raise NfvoException("unknown" + error_text + " at " + error_pos, HTTP_Not_Found)
+ elif len(vnf_db)>1:
+ raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", HTTP_Conflict)
+ vnf['uuid']=vnf_db[0]['uuid']
+ vnf['description']=vnf_db[0]['description']
+ #get external interfaces
+ ext_ifaces = mydb.get_rows(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
+ FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+ WHERE={'vnfs.uuid':vnf['uuid']}, WHERE_NOT={'external_name':None} )
+ for ext_iface in ext_ifaces:
+ vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
+
+ #1.4 get list of connections
+ conections = topo['topology']['connections']
+ conections_list = []
+ conections_list_name = []
+ for k in conections.keys():
+ if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
+ ifaces_list = conections[k]['nodes'].items()
+ elif type(conections[k]['nodes'])==list: #list with dictionary
+ ifaces_list=[]
+ conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
+ for k2 in conection_pair_list:
+ ifaces_list += k2
+
+ con_type = conections[k].get("type", "link")
+ if con_type != "link":
+ if k in other_nets:
+ raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), HTTP_Bad_Request)
+ other_nets[k] = {'external': False}
+ if conections[k].get("graph"):
+ other_nets[k]["graph"] = conections[k]["graph"]
+ ifaces_list.append( (k, None) )
+
+
+ if con_type == "external_network":
+ other_nets[k]['external'] = True
+ if conections[k].get("model"):
+ other_nets[k]["model"] = conections[k]["model"]
+ else:
+ other_nets[k]["model"] = k
+ if con_type == "dataplane_net" or con_type == "bridge_net":
+ other_nets[k]["model"] = con_type
+
+ conections_list_name.append(k)
+ conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
+ #print set(ifaces_list)
+ #check valid VNF and iface names
+ for iface in ifaces_list:
+ if iface[0] not in vnfs and iface[0] not in other_nets :
+ raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
+ str(k), iface[0]), HTTP_Not_Found)
+ if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
+ raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
+ str(k), iface[0], iface[1]), HTTP_Not_Found)
+
+ #1.5 unify connections from the pair list to a consolidated list
+ index=0
+ while index < len(conections_list):
+ index2 = index+1
+ while index2 < len(conections_list):
+ if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
+ conections_list[index] |= conections_list[index2]
+ del conections_list[index2]
+ del conections_list_name[index2]
+ else:
+ index2 += 1
+ conections_list[index] = list(conections_list[index]) # from set to list again
+ index += 1
+ #for k in conections_list:
+ # print k
+
+
+
+ #1.6 Delete non external nets
+ # for k in other_nets.keys():
+ # if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
+ # for con in conections_list:
+ # delete_indexes=[]
+ # for index in range(0,len(con)):
+ # if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
+ # for index in delete_indexes:
+ # del con[index]
+ # del other_nets[k]
+ #1.7: Check external_ports are present at database table datacenter_nets
+ for k,net in other_nets.items():
+ error_pos = "'topology':'nodes':'" + k + "'"
+ if net['external']==False:
+ if 'name' not in net:
+ net['name']=k
+ if 'model' not in net:
+ raise NfvoException("needed a 'model' at " + error_pos, HTTP_Bad_Request)
+ if net['model']=='bridge_net':
+ net['type']='bridge';
+ elif net['model']=='dataplane_net':
+ net['type']='data';
+ else:
+ raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, HTTP_Not_Found)
+ else: #external
+ #IF we do not want to check that external network exist at datacenter
+ pass
+ #ELSE
+ # error_text = ""
+ # WHERE_={}
+ # if 'net_id' in net:
+ # error_text += " 'net_id' " + net['net_id']
+ # WHERE_['uuid'] = net['net_id']
+ # if 'model' in net:
+ # error_text += " 'model' " + net['model']
+ # WHERE_['name'] = net['model']
+ # if len(WHERE_) == 0:
+ # return -HTTP_Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
+ # r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
+ # FROM='datacenter_nets', WHERE=WHERE_ )
+ # if r<0:
+ # print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
+ # elif r==0:
+ # print "nfvo.new_scenario Error" +error_text+ " is not present at database"
+ # return -HTTP_Bad_Request, "unknown " +error_text+ " at " + error_pos
+ # elif r>1:
+ # print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
+ # return -HTTP_Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
+ # other_nets[k].update(net_db[0])
+ #ENDIF
+ net_list={}
+ net_nb=0 #Number of nets
+ for con in conections_list:
+ #check if this is connected to a external net
+ other_net_index=-1
+ #print
+ #print "con", con
+ for index in range(0,len(con)):
+ #check if this is connected to a external net
+ for net_key in other_nets.keys():
+ if con[index][0]==net_key:
+ if other_net_index>=0:
+ error_text="There is some interface connected both to net '%s' and net '%s'" % (con[other_net_index][0], net_key)
+ #print "nfvo.new_scenario " + error_text
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ else:
+ other_net_index = index
+ net_target = net_key
+ break
+ #print "other_net_index", other_net_index
+ try:
+ if other_net_index>=0:
+ del con[other_net_index]
+ #IF we do not want to check that external network exist at datacenter
+ if other_nets[net_target]['external'] :
+ if "name" not in other_nets[net_target]:
+ other_nets[net_target]['name'] = other_nets[net_target]['model']
+ if other_nets[net_target]["type"] == "external_network":
+ if vnfs[ con[0][0] ]['ifaces'][ con[0][1] ]["type"] == "data":
+ other_nets[net_target]["type"] = "data"
+ else:
+ other_nets[net_target]["type"] = "bridge"
+ #ELSE
+ # if other_nets[net_target]['external'] :
+ # type_='data' if len(con)>1 else 'ptp' #an external net is connected to a external port, so it is ptp if only one connection is done to this net
+ # if type_=='data' and other_nets[net_target]['type']=="ptp":
+ # error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
+ # print "nfvo.new_scenario " + error_text
+ # return -HTTP_Bad_Request, error_text
+ #ENDIF
+ for iface in con:
+ vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+ else:
+ #create a net
+ net_type_bridge=False
+ net_type_data=False
+ net_target = "__-__net"+str(net_nb)
+ net_list[net_target] = {'name': conections_list_name[net_nb], #"net-"+str(net_nb),
+ 'description':"net-%s in scenario %s" %(net_nb,topo['name']),
+ 'external':False}
+ for iface in con:
+ vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+ iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
+ if iface_type=='mgmt' or iface_type=='bridge':
+ net_type_bridge = True
+ else:
+ net_type_data = True
+ if net_type_bridge and net_type_data:
+ error_text = "Error connection interfaces of bridge type with data type. Firs node %s, iface %s" % (iface[0], iface[1])
+ #print "nfvo.new_scenario " + error_text
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ elif net_type_bridge:
+ type_='bridge'
+ else:
+ type_='data' if len(con)>2 else 'ptp'
+ net_list[net_target]['type'] = type_
+ net_nb+=1
+ except Exception:
+ error_text = "Error connection node %s : %s does not match any VNF or interface" % (iface[0], iface[1])
+ #print "nfvo.new_scenario " + error_text
+ #raise e
+ raise NfvoException(error_text, HTTP_Bad_Request)
+
+ #1.8: Connect to management net all not already connected interfaces of type 'mgmt'
+ #1.8.1 obtain management net
+ mgmt_net = mydb.get_rows(SELECT=('uuid','name','description','type','shared'),
+ FROM='datacenter_nets', WHERE={'name':'mgmt'} )
+ #1.8.2 check all interfaces from all vnfs
+ if len(mgmt_net)>0:
+ add_mgmt_net = False
+ for vnf in vnfs.values():
+ for iface in vnf['ifaces'].values():
+ if iface['type']=='mgmt' and 'net_key' not in iface:
+ #iface not connected
+ iface['net_key'] = 'mgmt'
+ add_mgmt_net = True
+ if add_mgmt_net and 'mgmt' not in net_list:
+ net_list['mgmt']=mgmt_net[0]
+ net_list['mgmt']['external']=True
+ net_list['mgmt']['graph']={'visible':False}
+
+ net_list.update(other_nets)
+ #print
+ #print 'net_list', net_list
+ #print
+ #print 'vnfs', vnfs
+ #print
+
+ #2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
+ c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
+ 'tenant_id':tenant_id, 'name':topo['name'],
+ 'description':topo.get('description',topo['name']),
+ 'public': topo.get('public', False)
+ })
+
+ return c
+
+
+ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
+ """ This creates a new scenario for version 0.2 and 0.3"""
+ scenario = scenario_dict["scenario"]
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in scenario:
+ if scenario["tenant_id"] != tenant_id:
+ # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
+ scenario["tenant_id"], tenant_id), HTTP_Unauthorized)
+ else:
+ tenant_id=None
+
+ # 1: Check that VNF are present at database table vnfs and update content into scenario dict
+ for name,vnf in scenario["vnfs"].iteritems():
+ where={}
+ where_or={"tenant_id": tenant_id, 'public': "true"}
+ error_text = ""
+ error_pos = "'scenario':'vnfs':'" + name + "'"
+ if 'vnf_id' in vnf:
+ error_text += " 'vnf_id' " + vnf['vnf_id']
+ where['uuid'] = vnf['vnf_id']
+ if 'vnf_name' in vnf:
+ error_text += " 'vnf_name' " + vnf['vnf_name']
+ where['name'] = vnf['vnf_name']
+ if len(where) == 0:
+ raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, HTTP_Bad_Request)
+ vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
+ FROM='vnfs',
+ WHERE=where,
+ WHERE_OR=where_or,
+ WHERE_AND_OR="AND")
+ if len(vnf_db) == 0:
+ raise NfvoException("Unknown" + error_text + " at " + error_pos, HTTP_Not_Found)
+ elif len(vnf_db) > 1:
+ raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", HTTP_Conflict)
+ vnf['uuid'] = vnf_db[0]['uuid']
+ vnf['description'] = vnf_db[0]['description']
+ vnf['ifaces'] = {}
+ # get external interfaces
+ ext_ifaces = mydb.get_rows(SELECT=('external_name as name', 'i.uuid as iface_uuid', 'i.type as type'),
+ FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+ WHERE={'vnfs.uuid':vnf['uuid']}, WHERE_NOT={'external_name': None} )
+ for ext_iface in ext_ifaces:
+ vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type': ext_iface['type']}
+ # TODO? get internal-connections from db.nets and their profiles, and update scenario[vnfs][internal-connections] accordingly
+
+ # 2: Insert net_key and ip_address at every vnf interface
+ for net_name, net in scenario["networks"].items():
+ net_type_bridge = False
+ net_type_data = False
+ for iface_dict in net["interfaces"]:
+ if version == "0.2":
+ temp_dict = iface_dict
+ ip_address = None
+ elif version == "0.3":
+ temp_dict = {iface_dict["vnf"] : iface_dict["vnf_interface"]}
+ ip_address = iface_dict.get('ip_address', None)
+ for vnf, iface in temp_dict.items():
+ if vnf not in scenario["vnfs"]:
+ error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
+ net_name, vnf)
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, HTTP_Not_Found)
+ if iface not in scenario["vnfs"][vnf]['ifaces']:
+ error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
+ .format(net_name, iface)
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
+ error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
+ "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
+ scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
+ iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
+ if iface_type == 'mgmt' or iface_type == 'bridge':
+ net_type_bridge = True
+ else:
+ net_type_data = True
+
+ if net_type_bridge and net_type_data:
+ error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
+ .format(net_name)
+ # logger.debug("nfvo.new_scenario " + error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ elif net_type_bridge:
+ type_ = 'bridge'
+ else:
+ type_ = 'data' if len(net["interfaces"]) > 2 else 'ptp'
+
+ if net.get("implementation"): # for v0.3
+ if type_ == "bridge" and net["implementation"] == "underlay":
+ error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
+ "'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ elif type_ != "bridge" and net["implementation"] == "overlay":
+ error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
+ "'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ net.pop("implementation")
+ if "type" in net and version == "0.3": # for v0.3
+ if type_ == "data" and net["type"] == "e-line":
+ error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
+ "'e-line' at 'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ elif type_ == "ptp" and net["type"] == "e-lan":
+ type_ = "data"
+
+ net['type'] = type_
+ net['name'] = net_name
+ net['external'] = net.get('external', False)
+
+ # 3: insert at database
+ scenario["nets"] = scenario["networks"]
+ scenario['tenant_id'] = tenant_id
+ scenario_id = mydb.new_scenario(scenario)
+ return scenario_id
+
+
+ def edit_scenario(mydb, tenant_id, scenario_id, data):
+ data["uuid"] = scenario_id
+ data["tenant_id"] = tenant_id
+ c = mydb.edit_scenario( data )
+ return c
+
+
+ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
+ #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter, vim_tenant=vim_tenant)
+ vims = {datacenter_id: myvim}
+ myvim_tenant = myvim['tenant_id']
+ datacenter_name = myvim['name']
+
+ rollbackList=[]
+ try:
+ #print "Checking that the scenario_id exists and getting the scenario dictionary"
+ scenarioDict = mydb.get_scenario(scenario_id, tenant_id, datacenter_id)
+ scenarioDict['datacenter2tenant'] = { datacenter_id: myvim['config']['datacenter_tenant_id'] }
+ scenarioDict['datacenter_id'] = datacenter_id
+ #print '================scenarioDict======================='
+ #print json.dumps(scenarioDict, indent=4)
+ #print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
+
+ logger.debug("start_scenario Scenario %s: consisting of %d VNF(s)", scenarioDict['name'],len(scenarioDict['vnfs']))
+ #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+
+ auxNetDict = {} #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+ auxNetDict['scenario'] = {}
+
+ logger.debug("start_scenario 1. Creating new nets (sce_nets) in the VIM")
+ for sce_net in scenarioDict['nets']:
+ #print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
+
+ myNetName = "%s.%s" % (instance_scenario_name, sce_net['name'])
+ myNetName = myNetName[0:255] #limit length
+ myNetType = sce_net['type']
+ myNetDict = {}
+ myNetDict["name"] = myNetName
+ myNetDict["type"] = myNetType
+ myNetDict["tenant_id"] = myvim_tenant
+ myNetIPProfile = sce_net.get('ip_profile', None)
+ #TODO:
+ #We should use the dictionary as input parameter for new_network
+ #print myNetDict
+ if not sce_net["external"]:
+ network_id = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+ #print "New VIM network created for scenario %s. Network id: %s" % (scenarioDict['name'],network_id)
+ sce_net['vim_id'] = network_id
+ auxNetDict['scenario'][sce_net['uuid']] = network_id
+ rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+ sce_net["created"] = True
+ else:
+ if sce_net['vim_id'] == None:
+ error_text = "Error, datacenter '%s' does not have external network '%s'." % (datacenter_name, sce_net['name'])
+ _, message = rollback(mydb, vims, rollbackList)
+ logger.error("nfvo.start_scenario: %s", error_text)
+ raise NfvoException(error_text, HTTP_Bad_Request)
+ logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
+ auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
+
+ logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
+ #For each vnf net, we create it and we add it to instanceNetlist.
+ for sce_vnf in scenarioDict['vnfs']:
+ for net in sce_vnf['nets']:
+ #print "Net name: %s. Description: %s" % (net["name"], net["description"])
+
+ myNetName = "%s.%s" % (instance_scenario_name,net['name'])
+ myNetName = myNetName[0:255] #limit length
+ myNetType = net['type']
+ myNetDict = {}
+ myNetDict["name"] = myNetName
+ myNetDict["type"] = myNetType
+ myNetDict["tenant_id"] = myvim_tenant
+ myNetIPProfile = net.get('ip_profile', None)
+ #print myNetDict
+ #TODO:
+ #We should use the dictionary as input parameter for new_network
+ network_id = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+ #print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
+ net['vim_id'] = network_id
+ if sce_vnf['uuid'] not in auxNetDict:
+ auxNetDict[sce_vnf['uuid']] = {}
+ auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
+ rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+ net["created"] = True
+
+ #print "auxNetDict:"
+ #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+
+ logger.debug("start_scenario 3. Creating new vm instances in the VIM")
+ #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+ i = 0
+ for sce_vnf in scenarioDict['vnfs']:
+ for vm in sce_vnf['vms']:
+ i += 1
+ myVMDict = {}
+ #myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
+ myVMDict['name'] = "{}.{}.{}".format(instance_scenario_name,sce_vnf['name'],chr(96+i))
+ #myVMDict['description'] = vm['description']
+ myVMDict['description'] = myVMDict['name'][0:99]
+ if not startvms:
+ myVMDict['start'] = "no"
+ myVMDict['name'] = myVMDict['name'][0:255] #limit name length
+ #print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
+
+ #create image at vim in case it not exist
+ image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
+ image_id = create_or_use_image(mydb, vims, image_dict, [], True)
+ vm['vim_image_id'] = image_id
+
+ #create flavor at vim in case it not exist
+ flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+ if flavor_dict['extended']!=None:
+ flavor_dict['extended']= yaml.load(flavor_dict['extended'])
+ flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
+ vm['vim_flavor_id'] = flavor_id
+
+
+ myVMDict['imageRef'] = vm['vim_image_id']
+ myVMDict['flavorRef'] = vm['vim_flavor_id']
+ myVMDict['networks'] = []
+ for iface in vm['interfaces']:
+ netDict = {}
+ if iface['type']=="data":
+ netDict['type'] = iface['model']
+ elif "model" in iface and iface["model"]!=None:
+ netDict['model']=iface['model']
+ #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
+ #discover type of interface looking at flavor
+ for numa in flavor_dict.get('extended',{}).get('numas',[]):
+ for flavor_iface in numa.get('interfaces',[]):
+ if flavor_iface.get('name') == iface['internal_name']:
+ if flavor_iface['dedicated'] == 'yes':
+ netDict['type']="PF" #passthrough
+ elif flavor_iface['dedicated'] == 'no':
+ netDict['type']="VF" #siov
+ elif flavor_iface['dedicated'] == 'yes:sriov':
+ netDict['type']="VFnotShared" #sriov but only one sriov on the PF
+ netDict["mac_address"] = flavor_iface.get("mac_address")
+ break;
+ netDict["use"]=iface['type']
+ if netDict["use"]=="data" and not netDict.get("type"):
+ #print "netDict", netDict
+ #print "iface", iface
+ e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
+ if flavor_dict.get('extended')==None:
+ raise NfvoException(e_text + "After database migration some information is not available. \
+ Try to delete and create the scenarios and VNFs again", HTTP_Conflict)
+ else:
+ raise NfvoException(e_text, HTTP_Internal_Server_Error)
+ if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+ netDict["type"]="virtual"
+ if "vpci" in iface and iface["vpci"] is not None:
+ netDict['vpci'] = iface['vpci']
+ if "mac" in iface and iface["mac"] is not None:
+ netDict['mac_address'] = iface['mac']
+ if "port-security" in iface and iface["port-security"] is not None:
+ netDict['port_security'] = iface['port-security']
+ if "floating-ip" in iface and iface["floating-ip"] is not None:
+ netDict['floating_ip'] = iface['floating-ip']
+ netDict['name'] = iface['internal_name']
+ if iface['net_id'] is None:
+ for vnf_iface in sce_vnf["interfaces"]:
+ #print iface
+ #print vnf_iface
+ if vnf_iface['interface_id']==iface['uuid']:
+ netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
+ break
+ else:
+ netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
+ #skip bridge ifaces not connected to any net
+ #if 'net_id' not in netDict or netDict['net_id']==None:
+ # continue
+ myVMDict['networks'].append(netDict)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ #print myVMDict['name']
+ #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+ #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ vm_id = myvim.new_vminstance(myVMDict['name'],myVMDict['description'],myVMDict.get('start', None),
+ myVMDict['imageRef'],myVMDict['flavorRef'],myVMDict['networks'])
+ #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
+ vm['vim_id'] = vm_id
+ rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+ #put interface uuid back to scenario[vnfs][vms[[interfaces]
+ for net in myVMDict['networks']:
+ if "vim_id" in net:
+ for iface in vm['interfaces']:
+ if net["name"]==iface["internal_name"]:
+ iface["vim_id"]=net["vim_id"]
+ break
+
+ logger.debug("start scenario Deployment done")
+ #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+ #r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
+ instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_scenario_name, instance_scenario_description, scenarioDict)
+ return mydb.get_instance_scenario(instance_id)
+
+ except (db_base_Exception, vimconn.vimconnException) as e:
+ _, message = rollback(mydb, vims, rollbackList)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+ def unify_cloud_config(cloud_config_preserve, cloud_config):
+ ''' join the cloud config information into cloud_config_preserve.
+ In case of conflict cloud_config_preserve preserves
+ None is admited
+ '''
+ if not cloud_config_preserve and not cloud_config:
+ return None
+
+ new_cloud_config = {"key-pairs":[], "users":[]}
+ # key-pairs
+ if cloud_config_preserve:
+ for key in cloud_config_preserve.get("key-pairs", () ):
+ if key not in new_cloud_config["key-pairs"]:
+ new_cloud_config["key-pairs"].append(key)
+ if cloud_config:
+ for key in cloud_config.get("key-pairs", () ):
+ if key not in new_cloud_config["key-pairs"]:
+ new_cloud_config["key-pairs"].append(key)
+ if not new_cloud_config["key-pairs"]:
+ del new_cloud_config["key-pairs"]
+
+ # users
+ if cloud_config:
+ new_cloud_config["users"] += cloud_config.get("users", () )
+ if cloud_config_preserve:
+ new_cloud_config["users"] += cloud_config_preserve.get("users", () )
+ index_to_delete = []
+ users = new_cloud_config.get("users", [])
+ for index0 in range(0,len(users)):
+ if index0 in index_to_delete:
+ continue
+ for index1 in range(index0+1,len(users)):
+ if index1 in index_to_delete:
+ continue
+ if users[index0]["name"] == users[index1]["name"]:
+ index_to_delete.append(index1)
+ for key in users[index1].get("key-pairs",()):
+ if "key-pairs" not in users[index0]:
+ users[index0]["key-pairs"] = [key]
+ elif key not in users[index0]["key-pairs"]:
+ users[index0]["key-pairs"].append(key)
+ index_to_delete.sort(reverse=True)
+ for index in index_to_delete:
+ del users[index]
+ if not new_cloud_config["users"]:
+ del new_cloud_config["users"]
+
+ #boot-data-drive
+ if cloud_config and cloud_config.get("boot-data-drive") != None:
+ new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
+ if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
+ new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
+
+ # user-data
+ if cloud_config and cloud_config.get("user-data") != None:
+ new_cloud_config["user-data"] = cloud_config["user-data"]
+ if cloud_config_preserve and cloud_config_preserve.get("user-data") != None:
+ new_cloud_config["user-data"] = cloud_config_preserve["user-data"]
+
+ # config files
+ new_cloud_config["config-files"] = []
+ if cloud_config and cloud_config.get("config-files") != None:
+ new_cloud_config["config-files"] += cloud_config["config-files"]
+ if cloud_config_preserve:
+ for file in cloud_config_preserve.get("config-files", ()):
+ for index in range(0, len(new_cloud_config["config-files"])):
+ if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
+ new_cloud_config["config-files"][index] = file
+ break
+ else:
+ new_cloud_config["config-files"].append(file)
+ if not new_cloud_config["config-files"]:
+ del new_cloud_config["config-files"]
+ return new_cloud_config
+
+
- if datacenter_id_name:
- if utils.check_valid_uuid(datacenter_id_name):
- datacenter_id = datacenter_id_name
++def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
+ datacenter_id = None
+ datacenter_name = None
+ thread = None
- datacenter_name = datacenter_id_name
- if datacenter_id:
- thread = vim_threads["running"].get(datacenter_id + "." + tenant_id)
- else:
- for k, v in vim_threads["running"].items():
- datacenter_tenant = k.split(".")
- if datacenter_tenant[0] == datacenter_id and datacenter_tenant[1] == tenant_id:
- if thread:
- raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
- thread = v
- elif not datacenter_id and datacenter_tenant[1] == tenant_id:
- if thread.datacenter_name == datacenter_name:
- if thread:
- raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
- thread = v
- if not thread:
- raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
- return thread
-
++ try:
++ if datacenter_tenant_id:
++ thread_id = datacenter_tenant_id
++ thread = vim_threads["running"].get(datacenter_tenant_id)
+ else:
- myvim_threads = {}
- datacenter2tenant = {}
++ where_={"td.nfvo_tenant_id": tenant_id}
++ if datacenter_id_name:
++ if utils.check_valid_uuid(datacenter_id_name):
++ datacenter_id = datacenter_id_name
++ where_["dt.datacenter_id"] = datacenter_id
++ else:
++ datacenter_name = datacenter_id_name
++ where_["d.name"] = datacenter_name
++ if datacenter_tenant_id:
++ where_["dt.uuid"] = datacenter_tenant_id
++ datacenters = mydb.get_rows(
++ SELECT=("dt.uuid as datacenter_tenant_id",),
++ FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
++ "join datacenters as d on d.uuid=dt.datacenter_id",
++ WHERE=where_)
++ if len(datacenters) > 1:
++ raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
++ elif datacenters:
++ thread_id = datacenters[0]["datacenter_tenant_id"]
++ thread = vim_threads["running"].get(thread_id)
++ if not thread:
++ raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
++ return thread_id, thread
++ except db_base_Exception as e:
++ raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
+
+ def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
+ datacenter_id = None
+ datacenter_name = None
+ if datacenter_id_name:
+ if utils.check_valid_uuid(datacenter_id_name):
+ datacenter_id = datacenter_id_name
+ else:
+ datacenter_name = datacenter_id_name
+ vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
+ if len(vims) == 0:
+ raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
+ elif len(vims)>1:
+ #print "nfvo.datacenter_action() error. Several datacenters found"
+ raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+ return vims.keys()[0], vims.values()[0]
+
+
+ def update(d, u):
+ '''Takes dict d and updates it with the values in dict u.'''
+ '''It merges all depth levels'''
+ for k, v in u.iteritems():
+ if isinstance(v, collections.Mapping):
+ r = update(d.get(k, {}), v)
+ d[k] = r
+ else:
+ d[k] = u[k]
+ return d
+
+
+ def create_instance(mydb, tenant_id, instance_dict):
+ # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ # logger.debug("Creating instance...")
+ scenario = instance_dict["scenario"]
+
+ #find main datacenter
+ myvims = {}
- myvim_threads[default_datacenter_id] = get_vim_thread(tenant_id, default_datacenter_id)
- datacenter2tenant[default_datacenter_id] = vim['config']['datacenter_tenant_id']
++ myvim_threads_id = {}
++ instance_tasks={}
++ tasks_to_launch={}
+ datacenter = instance_dict.get("datacenter")
+ default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ myvims[default_datacenter_id] = vim
- instance_tasks={}
++ myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
++ tasks_to_launch[myvim_threads_id[default_datacenter_id]] = []
+ #myvim_tenant = myvim['tenant_id']
+ # default_datacenter_name = vim['name']
+ rollbackList=[]
+
+ #print "Checking that the scenario exists and getting the scenario dictionary"
+ scenarioDict = mydb.get_scenario(scenario, tenant_id, default_datacenter_id)
+
+ #logger.debug(">>>>>>> Dictionaries before merging")
+ #logger.debug(">>>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
+ #logger.debug(">>>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
+
+ scenarioDict['datacenter_id'] = default_datacenter_id
+
+ auxNetDict = {} #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+ auxNetDict['scenario'] = {}
+
+ logger.debug("Creating instance from scenario-dict:\n%s", yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)) #TODO remove
+ instance_name = instance_dict["name"]
+ instance_description = instance_dict.get("description")
- myvim_threads[d] = get_vim_thread(tenant_id, site["datacenter"])
- datacenter2tenant[d] = v['config']['datacenter_tenant_id']
+ try:
+ # 0 check correct parameters
+ for net_name, net_instance_desc in instance_dict.get("networks",{}).iteritems():
+ found = False
+ for scenario_net in scenarioDict['nets']:
+ if net_name == scenario_net["name"]:
+ found = True
+ break
+ if not found:
+ raise NfvoException("Invalid scenario network name '{}' at instance:networks".format(net_name), HTTP_Bad_Request)
+ if "sites" not in net_instance_desc:
+ net_instance_desc["sites"] = [ {} ]
+ site_without_datacenter_field = False
+ for site in net_instance_desc["sites"]:
+ if site.get("datacenter"):
+ if site["datacenter"] not in myvims:
+ #Add this datacenter to myvims
+ d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
+ myvims[d] = v
- myvim_threads[d] = get_vim_thread(tenant_id, vnf_instance_desc["datacenter"])
- datacenter2tenant[d] = v['config']['datacenter_tenant_id']
++ myvim_threads_id[d],_ = get_vim_thread(mydb, tenant_id, site["datacenter"])
++ tasks_to_launch[myvim_threads_id[d]] = []
+ site["datacenter"] = d #change name to id
+ else:
+ if site_without_datacenter_field:
+ raise NfvoException("Found more than one entries without datacenter field at instance:networks:{}:sites".format(net_name), HTTP_Bad_Request)
+ site_without_datacenter_field = True
+ site["datacenter"] = default_datacenter_id #change name to id
+
+ for vnf_name, vnf_instance_desc in instance_dict.get("vnfs",{}).iteritems():
+ found=False
+ for scenario_vnf in scenarioDict['vnfs']:
+ if vnf_name == scenario_vnf['name']:
+ found = True
+ break
+ if not found:
+ raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_instance_desc), HTTP_Bad_Request)
+ if "datacenter" in vnf_instance_desc:
+ # Add this datacenter to myvims
+ if vnf_instance_desc["datacenter"] not in myvims:
+ d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+ myvims[d] = v
- myvim_thread = myvim_threads[ site["datacenter"] ]
++ myvim_threads_id[d],_ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
++ tasks_to_launch[myvim_threads_id[d]] = []
+ scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
+
+ #0.1 parse cloud-config parameters
+ cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
+
+ #0.2 merge instance information into scenario
+ #Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
+ #However, this is not possible yet.
+ for net_name, net_instance_desc in instance_dict.get("networks",{}).iteritems():
+ for scenario_net in scenarioDict['nets']:
+ if net_name == scenario_net["name"]:
+ if 'ip-profile' in net_instance_desc:
+ ipprofile = net_instance_desc['ip-profile']
+ ipprofile['subnet_address'] = ipprofile.pop('subnet-address',None)
+ ipprofile['ip_version'] = ipprofile.pop('ip-version','IPv4')
+ ipprofile['gateway_address'] = ipprofile.pop('gateway-address',None)
+ ipprofile['dns_address'] = ipprofile.pop('dns-address',None)
+ if 'dhcp' in ipprofile:
+ ipprofile['dhcp_start_address'] = ipprofile['dhcp'].get('start-address',None)
+ ipprofile['dhcp_enabled'] = ipprofile['dhcp'].get('enabled',True)
+ ipprofile['dhcp_count'] = ipprofile['dhcp'].get('count',None)
+ del ipprofile['dhcp']
+ if 'ip_profile' not in scenario_net:
+ scenario_net['ip_profile'] = ipprofile
+ else:
+ update(scenario_net['ip_profile'],ipprofile)
+ for interface in net_instance_desc.get('interfaces', () ):
+ if 'ip_address' in interface:
+ for vnf in scenarioDict['vnfs']:
+ if interface['vnf'] == vnf['name']:
+ for vnf_interface in vnf['interfaces']:
+ if interface['vnf_interface'] == vnf_interface['external_name']:
+ vnf_interface['ip_address']=interface['ip_address']
+
+ #logger.debug(">>>>>>>> Merged dictionary")
+ logger.debug("Creating instance scenario-dict MERGED:\n%s", yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+
+
+ # 1. Creating new nets (sce_nets) in the VIM"
+ for sce_net in scenarioDict['nets']:
+ sce_net["vim_id_sites"]={}
+ descriptor_net = instance_dict.get("networks",{}).get(sce_net["name"],{})
+ net_name = descriptor_net.get("vim-network-name")
+ auxNetDict['scenario'][sce_net['uuid']] = {}
+
+ sites = descriptor_net.get("sites", [ {} ])
+ for site in sites:
+ if site.get("datacenter"):
+ vim = myvims[ site["datacenter"] ]
+ datacenter_id = site["datacenter"]
- myvim_thread = myvim_threads[default_datacenter_id]
++ myvim_thread_id = myvim_threads_id[ site["datacenter"] ]
+ else:
+ vim = myvims[ default_datacenter_id ]
+ datacenter_id = default_datacenter_id
- task_id = myvim_thread.insert_task(task)
++ myvim_thread_id = myvim_threads_id[default_datacenter_id]
+ net_type = sce_net['type']
+ lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'} #'shared': True
+ if sce_net["external"]:
+ if not net_name:
+ net_name = sce_net["name"]
+ if "netmap-use" in site or "netmap-create" in site:
+ create_network = False
+ lookfor_network = False
+ if "netmap-use" in site:
+ lookfor_network = True
+ if utils.check_valid_uuid(site["netmap-use"]):
+ filter_text = "scenario id '%s'" % site["netmap-use"]
+ lookfor_filter["id"] = site["netmap-use"]
+ else:
+ filter_text = "scenario name '%s'" % site["netmap-use"]
+ lookfor_filter["name"] = site["netmap-use"]
+ if "netmap-create" in site:
+ create_network = True
+ net_vim_name = net_name
+ if site["netmap-create"]:
+ net_vim_name = site["netmap-create"]
+
+ elif sce_net['vim_id'] != None:
+ #there is a netmap at datacenter_nets database #TODO REVISE!!!!
+ create_network = False
+ lookfor_network = True
+ lookfor_filter["id"] = sce_net['vim_id']
+ filter_text = "vim_id '%s' datacenter_netmap name '%s'. Try to reload vims with datacenter-net-update" % (sce_net['vim_id'], sce_net["name"])
+ #look for network at datacenter and return error
+ else:
+ #There is not a netmap, look at datacenter for a net with this name and create if not found
+ create_network = True
+ lookfor_network = True
+ lookfor_filter["name"] = sce_net["name"]
+ net_vim_name = sce_net["name"]
+ filter_text = "scenario name '%s'" % sce_net["name"]
+ else:
+ if not net_name:
+ net_name = "%s.%s" %(instance_name, sce_net["name"])
+ net_name = net_name[:255] #limit length
+ net_vim_name = net_name
+ create_network = True
+ lookfor_network = False
+
+ if lookfor_network:
+ vim_nets = vim.get_network_list(filter_dict=lookfor_filter)
+ if len(vim_nets) > 1:
+ raise NfvoException("More than one candidate VIM network found for " + filter_text, HTTP_Bad_Request )
+ elif len(vim_nets) == 0:
+ if not create_network:
+ raise NfvoException("No candidate VIM network found for " + filter_text, HTTP_Bad_Request )
+ else:
+ sce_net["vim_id_sites"][datacenter_id] = vim_nets[0]['id']
+ auxNetDict['scenario'][sce_net['uuid']][datacenter_id] = vim_nets[0]['id']
+ create_network = False
+ if create_network:
+ #if network is not external
+ task = new_task("new-net", (net_vim_name, net_type, sce_net.get('ip_profile',None)))
- myvim_thread = myvim_threads[ sce_vnf["datacenter"]]
++ task_id = task["id"]
+ instance_tasks[task_id] = task
++ tasks_to_launch[myvim_thread_id].append(task)
+ #network_id = vim.new_network(net_vim_name, net_type, sce_net.get('ip_profile',None))
+ sce_net["vim_id_sites"][datacenter_id] = task_id
+ auxNetDict['scenario'][sce_net['uuid']][datacenter_id] = task_id
+ rollbackList.append({'what':'network', 'where':'vim', 'vim_id':datacenter_id, 'uuid':task_id})
+ sce_net["created"] = True
+
+ # 2. Creating new nets (vnf internal nets) in the VIM"
+ #For each vnf net, we create it and we add it to instanceNetlist.
+ for sce_vnf in scenarioDict['vnfs']:
+ for net in sce_vnf['nets']:
+ if sce_vnf.get("datacenter"):
+ vim = myvims[ sce_vnf["datacenter"] ]
+ datacenter_id = sce_vnf["datacenter"]
- myvim_thread = myvim_threads[default_datacenter_id]
++ myvim_thread_id = myvim_threads_id[ sce_vnf["datacenter"]]
+ else:
+ vim = myvims[ default_datacenter_id ]
+ datacenter_id = default_datacenter_id
- task_id = myvim_thread.insert_task(task)
++ myvim_thread_id = myvim_threads_id[default_datacenter_id]
+ descriptor_net = instance_dict.get("vnfs",{}).get(sce_vnf["name"],{})
+ net_name = descriptor_net.get("name")
+ if not net_name:
+ net_name = "%s.%s" %(instance_name, net["name"])
+ net_name = net_name[:255] #limit length
+ net_type = net['type']
+ task = new_task("new-net", (net_name, net_type, net.get('ip_profile',None)))
- myvim_thread = myvim_threads[ sce_vnf["datacenter"] ]
++ task_id = task["id"]
+ instance_tasks[task_id] = task
++ tasks_to_launch[myvim_thread_id].append(task)
+ # network_id = vim.new_network(net_name, net_type, net.get('ip_profile',None))
+ net['vim_id'] = task_id
+ if sce_vnf['uuid'] not in auxNetDict:
+ auxNetDict[sce_vnf['uuid']] = {}
+ auxNetDict[sce_vnf['uuid']][net['uuid']] = task_id
+ rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':task_id})
+ net["created"] = True
+
+
+ #print "auxNetDict:"
+ #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+
+ # 3. Creating new vm instances in the VIM
+ #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+ for sce_vnf in scenarioDict['vnfs']:
+ if sce_vnf.get("datacenter"):
+ vim = myvims[ sce_vnf["datacenter"] ]
- myvim_thread = myvim_threads[ default_datacenter_id ]
++ myvim_thread_id = myvim_threads_id[ sce_vnf["datacenter"] ]
+ datacenter_id = sce_vnf["datacenter"]
+ else:
+ vim = myvims[ default_datacenter_id ]
- if is_task_id(netDict['net_id']):
++ myvim_thread_id = myvim_threads_id[ default_datacenter_id ]
+ datacenter_id = default_datacenter_id
+ sce_vnf["datacenter_id"] = datacenter_id
+ i = 0
+ for vm in sce_vnf['vms']:
+ i += 1
+ myVMDict = {}
+ myVMDict['name'] = "{}.{}.{}".format(instance_name,sce_vnf['name'],chr(96+i))
+ myVMDict['description'] = myVMDict['name'][0:99]
+ # if not startvms:
+ # myVMDict['start'] = "no"
+ myVMDict['name'] = myVMDict['name'][0:255] #limit name length
+ #create image at vim in case it not exist
+ image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
+ image_id = create_or_use_image(mydb, {datacenter_id: vim}, image_dict, [], True)
+ vm['vim_image_id'] = image_id
+
+ #create flavor at vim in case it not exist
+ flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+ if flavor_dict['extended']!=None:
+ flavor_dict['extended']= yaml.load(flavor_dict['extended'])
+ flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
+
+ #Obtain information for additional disks
+ extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',), WHERE={'vim_id': flavor_id})
+ if not extended_flavor_dict:
+ raise NfvoException("flavor '{}' not found".format(flavor_id), HTTP_Not_Found)
+ return
+
+ #extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0])
+ myVMDict['disks'] = None
+ extended_info = extended_flavor_dict[0]['extended']
+ if extended_info != None:
+ extended_flavor_dict_yaml = yaml.load(extended_info)
+ if 'disks' in extended_flavor_dict_yaml:
+ myVMDict['disks'] = extended_flavor_dict_yaml['disks']
+
+ vm['vim_flavor_id'] = flavor_id
+ myVMDict['imageRef'] = vm['vim_image_id']
+ myVMDict['flavorRef'] = vm['vim_flavor_id']
+ myVMDict['networks'] = []
+ task_depends = {}
+ #TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+ for iface in vm['interfaces']:
+ netDict = {}
+ if iface['type']=="data":
+ netDict['type'] = iface['model']
+ elif "model" in iface and iface["model"]!=None:
+ netDict['model']=iface['model']
+ #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
+ #discover type of interface looking at flavor
+ for numa in flavor_dict.get('extended',{}).get('numas',[]):
+ for flavor_iface in numa.get('interfaces',[]):
+ if flavor_iface.get('name') == iface['internal_name']:
+ if flavor_iface['dedicated'] == 'yes':
+ netDict['type']="PF" #passthrough
+ elif flavor_iface['dedicated'] == 'no':
+ netDict['type']="VF" #siov
+ elif flavor_iface['dedicated'] == 'yes:sriov':
+ netDict['type']="VFnotShared" #sriov but only one sriov on the PF
+ netDict["mac_address"] = flavor_iface.get("mac_address")
+ break;
+ netDict["use"]=iface['type']
+ if netDict["use"]=="data" and not netDict.get("type"):
+ #print "netDict", netDict
+ #print "iface", iface
+ e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
+ if flavor_dict.get('extended')==None:
+ raise NfvoException(e_text + "After database migration some information is not available. \
+ Try to delete and create the scenarios and VNFs again", HTTP_Conflict)
+ else:
+ raise NfvoException(e_text, HTTP_Internal_Server_Error)
+ if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+ netDict["type"]="virtual"
+ if "vpci" in iface and iface["vpci"] is not None:
+ netDict['vpci'] = iface['vpci']
+ if "mac" in iface and iface["mac"] is not None:
+ netDict['mac_address'] = iface['mac']
+ if "port-security" in iface and iface["port-security"] is not None:
+ netDict['port_security'] = iface['port-security']
+ if "floating-ip" in iface and iface["floating-ip"] is not None:
+ netDict['floating_ip'] = iface['floating-ip']
+ netDict['name'] = iface['internal_name']
+ if iface['net_id'] is None:
+ for vnf_iface in sce_vnf["interfaces"]:
+ #print iface
+ #print vnf_iface
+ if vnf_iface['interface_id']==iface['uuid']:
+ netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ][datacenter_id]
+ break
+ else:
+ netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
- vm_id = myvim_thread.insert_task(task)
- instance_tasks[vm_id] = task
-
++ if netDict.get('net_id') and is_task_id(netDict['net_id']):
+ task_depends[netDict['net_id']] = instance_tasks[netDict['net_id']]
+ #skip bridge ifaces not connected to any net
+ #if 'net_id' not in netDict or netDict['net_id']==None:
+ # continue
+ myVMDict['networks'].append(netDict)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ #print myVMDict['name']
+ #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+ #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ if vm.get("boot_data"):
+ cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config)
+ else:
+ cloud_config_vm = cloud_config
+ task = new_task("new-vm", (myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
+ myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
+ cloud_config_vm, myVMDict['disks']), depends=task_depends)
- scenarioDict["datacenter2tenant"] = datacenter2tenant
++ instance_tasks[task["id"]] = task
++ tasks_to_launch[myvim_thread_id].append(task)
++ vm_id = task["id"]
+ vm['vim_id'] = vm_id
+ rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+ #put interface uuid back to scenario[vnfs][vms[[interfaces]
+ for net in myVMDict['networks']:
+ if "vim_id" in net:
+ for iface in vm['interfaces']:
+ if net["name"]==iface["internal_name"]:
+ iface["vim_id"]=net["vim_id"]
+ break
- # Update database with those ended tasks
- for task in instance_tasks.values():
- if task["status"] == "ok":
- if task["name"] == "new-vm":
- mydb.update_rows("instance_vms", UPDATE={"vim_vm_id": task["result"]},
- WHERE={"vim_vm_id": task["id"]})
- elif task["name"] == "new-net":
- mydb.update_rows("instance_nets", UPDATE={"vim_net_id": task["result"]},
- WHERE={"vim_net_id": task["id"]})
++ scenarioDict["datacenter2tenant"] = myvim_threads_id
+ logger.debug("create_instance Deployment done scenarioDict: %s",
+ yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False) )
+ instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_name, instance_description, scenarioDict)
- myvim_thread = get_vim_thread(tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
++ for myvim_thread_id,task_list in tasks_to_launch.items():
++ for task in task_list:
++ vim_threads["running"][myvim_thread_id].insert_task(task)
++
++ global_instance_tasks[instance_id] = instance_tasks
++ # Update database with those ended instance_tasks
++ # for task in instance_tasks.values():
++ # if task["status"] == "ok":
++ # if task["name"] == "new-vm":
++ # mydb.update_rows("instance_vms", UPDATE={"vim_vm_id": task["result"]},
++ # WHERE={"vim_vm_id": task["id"]})
++ # elif task["name"] == "new-net":
++ # mydb.update_rows("instance_nets", UPDATE={"vim_net_id": task["result"]},
++ # WHERE={"vim_net_id": task["id"]})
+ return mydb.get_instance_scenario(instance_id)
+ except (NfvoException, vimconn.vimconnException,db_base_Exception) as e:
+ message = rollback(mydb, myvims, rollbackList)
+ if isinstance(e, db_base_Exception):
+ error_text = "database Exception"
+ elif isinstance(e, vimconn.vimconnException):
+ error_text = "VIM Exception"
+ else:
+ error_text = "Exception"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("create_instance: %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+ def delete_instance(mydb, tenant_id, instance_id):
+ #print "Checking that the instance_id exists and getting the instance dictionary"
+ instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
+ #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+ tenant_id = instanceDict["tenant_id"]
+ #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+
+ #1. Delete from Database
+ message = mydb.delete_instance_scenario(instance_id, tenant_id)
+
+ #2. delete from VIM
+ error_msg = ""
+ myvims = {}
+ myvim_threads = {}
+
+ #2.1 deleting VMs
+ #vm_fail_list=[]
+ for sce_vnf in instanceDict['vnfs']:
+ datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
- old_task = task_dict.get(task_id)
++ _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
+ datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
+ sce_vnf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+ for vm in sce_vnf['vms']:
+ if not myvim:
+ error_msg += "\n VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+ continue
+ try:
+ task=None
+ if is_task_id(vm['vim_vm_id']):
+ task_id = vm['vim_vm_id']
- task = new_task("del-vm", task_id, depends={task_id: old_task})
++ old_task = global_instance_tasks[instance_id].get(task_id)
+ if not old_task:
+ error_msg += "\n VM was scheduled for create, but task {} is not found".format(task_id)
+ continue
+ with task_lock:
+ if old_task["status"] == "enqueued":
+ old_task["status"] = "deleted"
+ elif old_task["status"] == "error":
+ continue
+ elif old_task["status"] == "processing":
- task = new_task("del-vm", old_task["result"])
++ task = new_task("del-vm", (task_id, vm["interfaces"]), depends={task_id: old_task})
+ else: #ok
- task = new_task("del-vm", vm['vim_vm_id'], store=False)
++ task = new_task("del-vm", (old_task["result"], vm["interfaces"]))
+ else:
- myvim_thread = get_vim_thread(tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
++ task = new_task("del-vm", (vm['vim_vm_id'], vm["interfaces"]) )
+ if task:
+ myvim_thread.insert_task(task)
+ except vimconn.vimconnNotFoundException as e:
+ error_msg+="\n VM VIM_id={} not found at datacenter={}".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+ logger.warn("VM instance '%s'uuid '%s', VIM id '%s', from VNF_id '%s' not found",
+ vm['name'], vm['uuid'], vm['vim_vm_id'], sce_vnf['vnf_id'])
+ except vimconn.vimconnException as e:
+ error_msg+="\n VM VIM_id={} at datacenter={} Error: {} {}".format(vm['vim_vm_id'], sce_vnf["datacenter_id"], e.http_code, str(e))
+ logger.error("Error %d deleting VM instance '%s'uuid '%s', VIM_id '%s', from VNF_id '%s': %s",
+ e.http_code, vm['name'], vm['uuid'], vm['vim_vm_id'], sce_vnf['vnf_id'], str(e))
+
+ #2.2 deleting NETS
+ #net_fail_list=[]
+ for net in instanceDict['nets']:
+ if not net['created']:
+ continue #skip not created nets
+ datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
- old_task = task_dict.get(task_id)
++ _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
+ datacenter_tenant_id=net["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
+ continue
+ try:
+ task = None
+ if is_task_id(net['vim_net_id']):
+ task_id = net['vim_net_id']
- task = new_task("del-net", net['vim_net_id'], store=False)
++ old_task = global_instance_tasks[instance_id].get(task_id)
+ if not old_task:
+ error_msg += "\n NET was scheduled for create, but task {} is not found".format(task_id)
+ continue
+ with task_lock:
+ if old_task["status"] == "enqueued":
+ old_task["status"] = "deleted"
+ elif old_task["status"] == "error":
+ continue
+ elif old_task["status"] == "processing":
+ task = new_task("del-net", task_id, depends={task_id: old_task})
+ else: # ok
+ task = new_task("del-net", old_task["result"])
+ else:
- # Assumption: nfvo_tenant and instance_id were checked before entering into this function
- #print "nfvo.refresh_instance begins"
- #print json.dumps(instanceDict, indent=4)
-
- #print "Getting the VIM URL and the VIM tenant_id"
- myvims={}
-
- # 1. Getting VIM vm and net list
- vms_updated = [] #List of VM instance uuids in openmano that were updated
- vms_notupdated=[]
- vm_list = {}
- for sce_vnf in instanceDict['vnfs']:
- datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
- if datacenter_key not in vm_list:
- vm_list[datacenter_key] = []
- if datacenter_key not in myvims:
- vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
- datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
- if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
- myvims[datacenter_key] = None
- else:
- myvims[datacenter_key] = vims.values()[0]
- for vm in sce_vnf['vms']:
- vm_list[datacenter_key].append(vm['vim_vm_id'])
- vms_notupdated.append(vm["uuid"])
-
- nets_updated = [] #List of VM instance uuids in openmano that were updated
- nets_notupdated=[]
- net_list = {}
- for net in instanceDict['nets']:
- datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
- if datacenter_key not in net_list:
- net_list[datacenter_key] = []
- if datacenter_key not in myvims:
- vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
- datacenter_tenant_id=net["datacenter_tenant_id"])
- if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
- myvims[datacenter_key] = None
- else:
- myvims[datacenter_key] = vims.values()[0]
-
- net_list[datacenter_key].append(net['vim_net_id'])
- nets_notupdated.append(net["uuid"])
-
- # 1. Getting the status of all VMs
- vm_dict={}
- for datacenter_key in myvims:
- if not vm_list.get(datacenter_key):
- continue
- failed = True
- failed_message=""
- if not myvims[datacenter_key]:
- failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
- else:
- try:
- vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
- failed = False
- except vimconn.vimconnException as e:
- logger.error("VIM exception %s %s", type(e).__name__, str(e))
- failed_message = str(e)
- if failed:
- for vm in vm_list[datacenter_key]:
- vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-
- # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
- for sce_vnf in instanceDict['vnfs']:
- for vm in sce_vnf['vms']:
- vm_id = vm['vim_vm_id']
- interfaces = vm_dict[vm_id].pop('interfaces', [])
- #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
- has_mgmt_iface = False
- for iface in vm["interfaces"]:
- if iface["type"]=="mgmt":
- has_mgmt_iface = True
- if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
- vm_dict[vm_id]['status'] = "ACTIVE"
- if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
- vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
- if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
- vm['status'] = vm_dict[vm_id]['status']
- vm['error_msg'] = vm_dict[vm_id].get('error_msg')
- vm['vim_info'] = vm_dict[vm_id].get('vim_info')
- # 2.1. Update in openmano DB the VMs whose status changed
- try:
- updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
- vms_notupdated.remove(vm["uuid"])
- if updates>0:
- vms_updated.append(vm["uuid"])
- except db_base_Exception as e:
- logger.error("nfvo.refresh_instance error database update: %s", str(e))
- # 2.2. Update in openmano DB the interface VMs
- for interface in interfaces:
- #translate from vim_net_id to instance_net_id
- network_id_list=[]
- for net in instanceDict['nets']:
- if net["vim_net_id"] == interface["vim_net_id"]:
- network_id_list.append(net["uuid"])
- if not network_id_list:
- continue
- del interface["vim_net_id"]
- try:
- for network_id in network_id_list:
- mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
- except db_base_Exception as e:
- logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
-
- # 3. Getting the status of all nets
- net_dict = {}
- for datacenter_key in myvims:
- if not net_list.get(datacenter_key):
- continue
- failed = True
- failed_message = ""
- if not myvims[datacenter_key]:
- failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
- else:
- try:
- net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
- failed = False
- except vimconn.vimconnException as e:
- logger.error("VIM exception %s %s", type(e).__name__, str(e))
- failed_message = str(e)
- if failed:
- for net in net_list[datacenter_key]:
- net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-
- # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
- # TODO: update nets inside a vnf
- for net in instanceDict['nets']:
- net_id = net['vim_net_id']
- if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
- net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
- if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
- net['status'] = net_dict[net_id]['status']
- net['error_msg'] = net_dict[net_id].get('error_msg')
- net['vim_info'] = net_dict[net_id].get('vim_info')
- # 5.1. Update in openmano DB the nets whose status changed
- try:
- updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
- nets_notupdated.remove(net["uuid"])
- if updated>0:
- nets_updated.append(net["uuid"])
- except db_base_Exception as e:
- logger.error("nfvo.refresh_instance error database update: %s", str(e))
-
- # Returns appropriate output
- #print "nfvo.refresh_instance finishes"
- logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
- str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
++ task = new_task("del-net", (net['vim_net_id'], net['sdn_net_id']))
+ if task:
+ myvim_thread.insert_task(task)
+ except vimconn.vimconnNotFoundException as e:
+ error_msg += "\n NET VIM_id={} not found at datacenter={}".format(net['vim_net_id'], net["datacenter_id"])
+ logger.warn("NET '%s', VIM_id '%s', from VNF_net_id '%s' not found",
+ net['uuid'], net['vim_net_id'], str(net['vnf_net_id']))
+ except vimconn.vimconnException as e:
+ error_msg += "\n NET VIM_id={} at datacenter={} Error: {} {}".format(net['vim_net_id'],
+ net["datacenter_id"],
+ e.http_code, str(e))
+ logger.error("Error %d deleting NET '%s', VIM_id '%s', from VNF_net_id '%s': %s",
+ e.http_code, net['uuid'], net['vim_net_id'], str(net['vnf_net_id']), str(e))
+ if len(error_msg) > 0:
+ return 'instance ' + message + ' deleted but some elements could not be deleted, or already deleted (error: 404) from VIM: ' + error_msg
+ else:
+ return 'instance ' + message + ' deleted'
+
+
+ def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
+ '''Refreshes a scenario instance. It modifies instanceDict'''
+ '''Returns:
+ - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
+ - error_msg
+ '''
- if len(vms_notupdated)+len(nets_notupdated)>0:
- error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
- return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
++ # # Assumption: nfvo_tenant and instance_id were checked before entering into this function
++ # #print "nfvo.refresh_instance begins"
++ # #print json.dumps(instanceDict, indent=4)
++ #
++ # #print "Getting the VIM URL and the VIM tenant_id"
++ # myvims={}
++ #
++ # # 1. Getting VIM vm and net list
++ # vms_updated = [] #List of VM instance uuids in openmano that were updated
++ # vms_notupdated=[]
++ # vm_list = {}
++ # for sce_vnf in instanceDict['vnfs']:
++ # datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
++ # if datacenter_key not in vm_list:
++ # vm_list[datacenter_key] = []
++ # if datacenter_key not in myvims:
++ # vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
++ # datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
++ # if len(vims) == 0:
++ # logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
++ # myvims[datacenter_key] = None
++ # else:
++ # myvims[datacenter_key] = vims.values()[0]
++ # for vm in sce_vnf['vms']:
++ # vm_list[datacenter_key].append(vm['vim_vm_id'])
++ # vms_notupdated.append(vm["uuid"])
++ #
++ # nets_updated = [] #List of VM instance uuids in openmano that were updated
++ # nets_notupdated=[]
++ # net_list = {}
++ # for net in instanceDict['nets']:
++ # datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
++ # if datacenter_key not in net_list:
++ # net_list[datacenter_key] = []
++ # if datacenter_key not in myvims:
++ # vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
++ # datacenter_tenant_id=net["datacenter_tenant_id"])
++ # if len(vims) == 0:
++ # logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
++ # myvims[datacenter_key] = None
++ # else:
++ # myvims[datacenter_key] = vims.values()[0]
++ #
++ # net_list[datacenter_key].append(net['vim_net_id'])
++ # nets_notupdated.append(net["uuid"])
++ #
++ # # 1. Getting the status of all VMs
++ # vm_dict={}
++ # for datacenter_key in myvims:
++ # if not vm_list.get(datacenter_key):
++ # continue
++ # failed = True
++ # failed_message=""
++ # if not myvims[datacenter_key]:
++ # failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
++ # else:
++ # try:
++ # vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
++ # failed = False
++ # except vimconn.vimconnException as e:
++ # logger.error("VIM exception %s %s", type(e).__name__, str(e))
++ # failed_message = str(e)
++ # if failed:
++ # for vm in vm_list[datacenter_key]:
++ # vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
++ #
++ # # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
++ # for sce_vnf in instanceDict['vnfs']:
++ # for vm in sce_vnf['vms']:
++ # vm_id = vm['vim_vm_id']
++ # interfaces = vm_dict[vm_id].pop('interfaces', [])
++ # #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
++ # has_mgmt_iface = False
++ # for iface in vm["interfaces"]:
++ # if iface["type"]=="mgmt":
++ # has_mgmt_iface = True
++ # if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
++ # vm_dict[vm_id]['status'] = "ACTIVE"
++ # if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
++ # vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
++ # if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
++ # vm['status'] = vm_dict[vm_id]['status']
++ # vm['error_msg'] = vm_dict[vm_id].get('error_msg')
++ # vm['vim_info'] = vm_dict[vm_id].get('vim_info')
++ # # 2.1. Update in openmano DB the VMs whose status changed
++ # try:
++ # updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
++ # vms_notupdated.remove(vm["uuid"])
++ # if updates>0:
++ # vms_updated.append(vm["uuid"])
++ # except db_base_Exception as e:
++ # logger.error("nfvo.refresh_instance error database update: %s", str(e))
++ # # 2.2. Update in openmano DB the interface VMs
++ # for interface in interfaces:
++ # #translate from vim_net_id to instance_net_id
++ # network_id_list=[]
++ # for net in instanceDict['nets']:
++ # if net["vim_net_id"] == interface["vim_net_id"]:
++ # network_id_list.append(net["uuid"])
++ # if not network_id_list:
++ # continue
++ # del interface["vim_net_id"]
++ # try:
++ # for network_id in network_id_list:
++ # mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
++ # except db_base_Exception as e:
++ # logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
++ #
++ # # 3. Getting the status of all nets
++ # net_dict = {}
++ # for datacenter_key in myvims:
++ # if not net_list.get(datacenter_key):
++ # continue
++ # failed = True
++ # failed_message = ""
++ # if not myvims[datacenter_key]:
++ # failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
++ # else:
++ # try:
++ # net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
++ # failed = False
++ # except vimconn.vimconnException as e:
++ # logger.error("VIM exception %s %s", type(e).__name__, str(e))
++ # failed_message = str(e)
++ # if failed:
++ # for net in net_list[datacenter_key]:
++ # net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
++ #
++ # # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
++ # # TODO: update nets inside a vnf
++ # for net in instanceDict['nets']:
++ # net_id = net['vim_net_id']
++ # if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
++ # net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
++ # if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
++ # net['status'] = net_dict[net_id]['status']
++ # net['error_msg'] = net_dict[net_id].get('error_msg')
++ # net['vim_info'] = net_dict[net_id].get('vim_info')
++ # # 5.1. Update in openmano DB the nets whose status changed
++ # try:
++ # updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
++ # nets_notupdated.remove(net["uuid"])
++ # if updated>0:
++ # nets_updated.append(net["uuid"])
++ # except db_base_Exception as e:
++ # logger.error("nfvo.refresh_instance error database update: %s", str(e))
++ #
++ # # Returns appropriate output
++ # #print "nfvo.refresh_instance finishes"
++ # logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
++ # str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
+ instance_id = instanceDict['uuid']
- config_dict = yaml.load(datacenter["config"])
++ # if len(vms_notupdated)+len(nets_notupdated)>0:
++ # error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
++ # return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
+
+ return 0, 'Scenario instance ' + instance_id + ' refreshed.'
+
+
+ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
+ #print "Checking that the instance_id exists and getting the instance dictionary"
+ instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
+ #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+
+ #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
+ if len(vims) == 0:
+ raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), HTTP_Not_Found)
+ myvim = vims.values()[0]
+
+
+ input_vnfs = action_dict.pop("vnfs", [])
+ input_vms = action_dict.pop("vms", [])
+ action_over_all = True if len(input_vnfs)==0 and len (input_vms)==0 else False
+ vm_result = {}
+ vm_error = 0
+ vm_ok = 0
+ for sce_vnf in instanceDict['vnfs']:
+ for vm in sce_vnf['vms']:
+ if not action_over_all:
+ if sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
+ vm['uuid'] not in input_vms and vm['name'] not in input_vms:
+ continue
+ try:
+ data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
+ if "console" in action_dict:
+ if not global_config["http_console_proxy"]:
+ vm_result[ vm['uuid'] ] = {"vim_result": 200,
+ "description": "{protocol}//{ip}:{port}/{suffix}".format(
+ protocol=data["protocol"],
+ ip = data["server"],
+ port = data["port"],
+ suffix = data["suffix"]),
+ "name":vm['name']
+ }
+ vm_ok +=1
+ elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
+ vm_result[ vm['uuid'] ] = {"vim_result": -HTTP_Unauthorized,
+ "description": "this console is only reachable by local interface",
+ "name":vm['name']
+ }
+ vm_error+=1
+ else:
+ #print "console data", data
+ try:
+ console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
+ vm_result[ vm['uuid'] ] = {"vim_result": 200,
+ "description": "{protocol}//{ip}:{port}/{suffix}".format(
+ protocol=data["protocol"],
+ ip = global_config["http_console_host"],
+ port = console_thread.port,
+ suffix = data["suffix"]),
+ "name":vm['name']
+ }
+ vm_ok +=1
+ except NfvoException as e:
+ vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+ vm_error+=1
+
+ else:
+ vm_result[ vm['uuid'] ] = {"vim_result": 200, "description": "ok", "name":vm['name']}
+ vm_ok +=1
+ except vimconn.vimconnException as e:
+ vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+ vm_error+=1
+
+ if vm_ok==0: #all goes wrong
+ return vm_result
+ else:
+ return vm_result
+
+
+ def create_or_use_console_proxy_thread(console_server, console_port):
+ #look for a non-used port
+ console_thread_key = console_server + ":" + str(console_port)
+ if console_thread_key in global_config["console_thread"]:
+ #global_config["console_thread"][console_thread_key].start_timeout()
+ return global_config["console_thread"][console_thread_key]
+
+ for port in global_config["console_port_iterator"]():
+ #print "create_or_use_console_proxy_thread() port:", port
+ if port in global_config["console_ports"]:
+ continue
+ try:
+ clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
+ clithread.start()
+ global_config["console_thread"][console_thread_key] = clithread
+ global_config["console_ports"][port] = console_thread_key
+ return clithread
+ except cli.ConsoleProxyExceptionPortUsed as e:
+ #port used, try with onoher
+ continue
+ except cli.ConsoleProxyException as e:
+ raise NfvoException(str(e), HTTP_Bad_Request)
+ raise NfvoException("Not found any free 'http_console_ports'", HTTP_Conflict)
+
+
+ def check_tenant(mydb, tenant_id):
+ '''check that tenant exists at database'''
+ tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
+ if not tenant:
+ raise NfvoException("tenant '{}' not found".format(tenant_id), HTTP_Not_Found)
+ return
+
+
+ def new_tenant(mydb, tenant_dict):
+ tenant_id = mydb.new_row("nfvo_tenants", tenant_dict, add_uuid=True)
+ return tenant_id
+
+
+ def delete_tenant(mydb, tenant):
+ #get nfvo_tenant info
+
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
+ mydb.delete_row_by_id("nfvo_tenants", tenant_dict['uuid'])
+ return tenant_dict['uuid'] + " " + tenant_dict["name"]
+
+
+ def new_datacenter(mydb, datacenter_descriptor):
+ if "config" in datacenter_descriptor:
+ datacenter_descriptor["config"]=yaml.safe_dump(datacenter_descriptor["config"],default_flow_style=True,width=256)
+ #Check that datacenter-type is correct
+ datacenter_type = datacenter_descriptor.get("type", "openvim");
+ module_info = None
+ try:
+ module = "vimconn_" + datacenter_type
+ module_info = imp.find_module(module)
+ except (IOError, ImportError):
+ if module_info and module_info[0]:
+ file.close(module_info[0])
+ raise NfvoException("Incorrect datacenter type '{}'. Plugin '{}'.py not installed".format(datacenter_type, module), HTTP_Bad_Request)
+
+ datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True)
+ return datacenter_id
+
+
+ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
+ #obtain data, check that only one exist
+ datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
+ #edit data
+ datacenter_id = datacenter['uuid']
+ where={'uuid': datacenter['uuid']}
+ if "config" in datacenter_descriptor:
+ if datacenter_descriptor['config']!=None:
+ try:
+ new_config_dict = datacenter_descriptor["config"]
+ #delete null fields
+ to_delete=[]
+ for k in new_config_dict:
+ if new_config_dict[k]==None:
+ to_delete.append(k)
+
- new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, datacenter_name, db=db, db_lock=db_lock)
++ config_text = datacenter.get("config")
++ if not config_text:
++ config_text = '{}'
++ config_dict = yaml.load(config_text)
+ config_dict.update(new_config_dict)
+ #delete null fields
+ for k in to_delete:
+ del config_dict[k]
+ except Exception as e:
+ raise NfvoException("Bad format at datacenter:config " + str(e), HTTP_Bad_Request)
+ datacenter_descriptor["config"]= yaml.safe_dump(config_dict,default_flow_style=True,width=256) if len(config_dict)>0 else None
+ mydb.update_rows('datacenters', datacenter_descriptor, where)
+ return datacenter_id
+
+
+ def delete_datacenter(mydb, datacenter):
+ #get nfvo_tenant info
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
+ mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
+ return datacenter_dict['uuid'] + " " + datacenter_dict['name']
+
+
+ def associate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter, vim_tenant_id=None, vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter)
+ datacenter_name = myvim["name"]
+
+ create_vim_tenant = True if not vim_tenant_id and not vim_tenant_name else False
+
+ # get nfvo_tenant info
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
+ if vim_tenant_name==None:
+ vim_tenant_name=tenant_dict['name']
+
+ #check that this association does not exist before
+ tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
+ tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+ if len(tenants_datacenters)>0:
+ raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), HTTP_Conflict)
+
+ vim_tenant_id_exist_atdb=False
+ if not create_vim_tenant:
+ where_={"datacenter_id": datacenter_id}
+ if vim_tenant_id!=None:
+ where_["vim_tenant_id"] = vim_tenant_id
+ if vim_tenant_name!=None:
+ where_["vim_tenant_name"] = vim_tenant_name
+ #check if vim_tenant_id is already at database
+ datacenter_tenants_dict = mydb.get_rows(FROM='datacenter_tenants', WHERE=where_)
+ if len(datacenter_tenants_dict)>=1:
+ datacenter_tenants_dict = datacenter_tenants_dict[0]
+ vim_tenant_id_exist_atdb=True
+ #TODO check if a field has changed and edit entry at datacenter_tenants at DB
+ else: #result=0
+ datacenter_tenants_dict = {}
+ #insert at table datacenter_tenants
+ else: #if vim_tenant_id==None:
+ #create tenant at VIM if not provided
+ try:
+ vim_tenant_id = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
+ except vimconn.vimconnException as e:
+ raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_id, str(e)), HTTP_Internal_Server_Error)
+ datacenter_tenants_dict = {}
+ datacenter_tenants_dict["created"]="true"
+
+ #fill datacenter_tenants table
+ if not vim_tenant_id_exist_atdb:
+ datacenter_tenants_dict["vim_tenant_id"] = vim_tenant_id
+ datacenter_tenants_dict["vim_tenant_name"] = vim_tenant_name
+ datacenter_tenants_dict["user"] = vim_username
+ datacenter_tenants_dict["passwd"] = vim_password
+ datacenter_tenants_dict["datacenter_id"] = datacenter_id
+ if config:
+ datacenter_tenants_dict["config"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+ id_ = mydb.new_row('datacenter_tenants', datacenter_tenants_dict, add_uuid=True)
+ datacenter_tenants_dict["uuid"] = id_
+
+ #fill tenants_datacenters table
+ tenants_datacenter_dict["datacenter_tenant_id"]=datacenter_tenants_dict["uuid"]
+ mydb.new_row('tenants_datacenters', tenants_datacenter_dict)
+ # create thread
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_dict['uuid'], datacenter_id) # reload data
+ thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
- thread_id = datacenter_id + "." + tenant_dict['uuid']
++ new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, datacenter_name, db=db, db_lock=db_lock, ovim=ovim)
+ new_thread.start()
- thread_id = datacenter_id + "." + tenant_datacenter_item["nfvo_tenant_id"]
++ thread_id = datacenter_tenants_dict["uuid"]
+ vim_threads["running"][thread_id] = new_thread
+ return datacenter_id
+
++def edit_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=None, vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
++ #Obtain the data of this datacenter_tenant_id
++ vim_data = mydb.get_rows(
++ SELECT=("datacenter_tenants.vim_tenant_name", "datacenter_tenants.vim_tenant_id", "datacenter_tenants.user",
++ "datacenter_tenants.passwd", "datacenter_tenants.config"),
++ FROM="datacenter_tenants JOIN tenants_datacenters ON datacenter_tenants.uuid=tenants_datacenters.datacenter_tenant_id",
++ WHERE={"tenants_datacenters.nfvo_tenant_id": nfvo_tenant,
++ "tenants_datacenters.datacenter_id": datacenter_id})
++
++ logger.debug(str(vim_data))
++ if len(vim_data) < 1:
++ raise NfvoException("Datacenter {} is not attached for tenant {}".format(datacenter_id, nfvo_tenant), HTTP_Conflict)
++
++ v = vim_data[0]
++ if v['config']:
++ v['config'] = yaml.load(v['config'])
++
++ if vim_tenant_id:
++ v['vim_tenant_id'] = vim_tenant_id
++ if vim_tenant_name:
++ v['vim_tenant_name'] = vim_tenant_name
++ if vim_username:
++ v['user'] = vim_username
++ if vim_password:
++ v['passwd'] = vim_password
++ if config:
++ if not v['config']:
++ v['config'] = {}
++ v['config'].update(config)
++
++ logger.debug(str(v))
++ deassociate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=v['vim_tenant_id'])
++ associate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=v['vim_tenant_id'], vim_tenant_name=v['vim_tenant_name'],
++ vim_username=v['user'], vim_password=v['passwd'], config=v['config'])
++
++ return datacenter_id
+
+ def deassociate_datacenter_to_tenant(mydb, tenant_id, datacenter, vim_tenant_id=None):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter)
+
+ #get nfvo_tenant info
+ if not tenant_id or tenant_id=="any":
+ tenant_uuid = None
+ else:
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+ tenant_uuid = tenant_dict['uuid']
+
+ #check that this association exist before
+ tenants_datacenter_dict={"datacenter_id":datacenter_id }
+ if tenant_uuid:
+ tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
+ tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+ if len(tenant_datacenter_list)==0 and tenant_uuid:
+ raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), HTTP_Not_Found)
+
+ #delete this association
+ mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+
+ #get vim_tenant info and deletes
+ warning=''
+ for tenant_datacenter_item in tenant_datacenter_list:
+ vim_tenant_dict = mydb.get_table_by_uuid_name('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+ #try to delete vim:tenant
+ try:
+ mydb.delete_row_by_id('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+ if vim_tenant_dict['created']=='true':
+ #delete tenant at VIM if created by NFVO
+ try:
+ myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
+ except vimconn.vimconnException as e:
+ warning = "Not possible to delete vim_tenant_id {} from VIM: {} ".format(vim_tenant_dict['vim_tenant_id'], str(e))
+ logger.warn(warning)
+ except db_base_Exception as e:
+ logger.error("Cannot delete datacenter_tenants " + str(e))
+ pass # the error will be caused because dependencies, vim_tenant can not be deleted
- thread.insert_task(new_task("exit", None, store=False))
++ thread_id = tenant_datacenter_item["datacenter_tenant_id"]
+ thread = vim_threads["running"][thread_id]
++ thread.insert_task(new_task("exit", None))
+ vim_threads["deleting"][thread_id] = thread
+ return "datacenter {} detached. {}".format(datacenter_id, warning)
+
+
+ def datacenter_action(mydb, tenant_id, datacenter, action_dict):
+ #DEPRECATED
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+ if 'net-update' in action_dict:
+ try:
+ nets = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
+ #print content
+ except vimconn.vimconnException as e:
+ #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+ raise NfvoException(str(e), HTTP_Internal_Server_Error)
+ #update nets Change from VIM format to NFVO format
+ net_list=[]
+ for net in nets:
+ net_nfvo={'datacenter_id': datacenter_id}
+ net_nfvo['name'] = net['name']
+ #net_nfvo['description']= net['name']
+ net_nfvo['vim_net_id'] = net['id']
+ net_nfvo['type'] = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man') to ('bridge','data','ptp')
+ net_nfvo['shared'] = net['shared']
+ net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+ net_list.append(net_nfvo)
+ inserted, deleted = mydb.update_datacenter_nets(datacenter_id, net_list)
+ logger.info("Inserted %d nets, deleted %d old nets", inserted, deleted)
+ return inserted
+ elif 'net-edit' in action_dict:
+ net = action_dict['net-edit'].pop('net')
+ what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+ result = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
+ WHERE={'datacenter_id':datacenter_id, what: net})
+ return result
+ elif 'net-delete' in action_dict:
+ net = action_dict['net-deelte'].get('net')
+ what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+ result = mydb.delete_row(FROM='datacenter_nets',
+ WHERE={'datacenter_id':datacenter_id, what: net})
+ return result
+
+ else:
+ raise NfvoException("Unknown action " + str(action_dict), HTTP_Bad_Request)
+
+
+ def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
+ #get datacenter info
+ datacenter_id, _ = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+ what = 'uuid' if utils.check_valid_uuid(netmap) else 'name'
+ result = mydb.update_rows('datacenter_nets', action_dict['netmap'],
+ WHERE={'datacenter_id':datacenter_id, what: netmap})
+ return result
+
+
+ def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ filter_dict={}
+ if action_dict:
+ action_dict = action_dict["netmap"]
+ if 'vim_id' in action_dict:
+ filter_dict["id"] = action_dict['vim_id']
+ if 'vim_name' in action_dict:
+ filter_dict["name"] = action_dict['vim_name']
+ else:
+ filter_dict["shared"] = True
+
+ try:
+ vim_nets = myvim.get_network_list(filter_dict=filter_dict)
+ except vimconn.vimconnException as e:
+ #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
+ raise NfvoException(str(e), HTTP_Internal_Server_Error)
+ if len(vim_nets)>1 and action_dict:
+ raise NfvoException("more than two networks found, specify with vim_id", HTTP_Conflict)
+ elif len(vim_nets)==0: # and action_dict:
+ raise NfvoException("Not found a network at VIM with " + str(filter_dict), HTTP_Not_Found)
+ net_list=[]
+ for net in vim_nets:
+ net_nfvo={'datacenter_id': datacenter_id}
+ if action_dict and "name" in action_dict:
+ net_nfvo['name'] = action_dict['name']
+ else:
+ net_nfvo['name'] = net['name']
+ #net_nfvo['description']= net['name']
+ net_nfvo['vim_net_id'] = net['id']
+ net_nfvo['type'] = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man') to ('bridge','data','ptp')
+ net_nfvo['shared'] = net['shared']
+ net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+ try:
+ net_id = mydb.new_row("datacenter_nets", net_nfvo, add_uuid=True)
+ net_nfvo["status"] = "OK"
+ net_nfvo["uuid"] = net_id
+ except db_base_Exception as e:
+ if action_dict:
+ raise
+ else:
+ net_nfvo["status"] = "FAIL: " + str(e)
+ net_list.append(net_nfvo)
+ return net_list
+
+
+ def vim_action_get(mydb, tenant_id, datacenter, item, name):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ filter_dict={}
+ if name:
+ if utils.check_valid_uuid(name):
+ filter_dict["id"] = name
+ else:
+ filter_dict["name"] = name
+ try:
+ if item=="networks":
+ #filter_dict['tenant_id'] = myvim['tenant_id']
+ content = myvim.get_network_list(filter_dict=filter_dict)
+ elif item=="tenants":
+ content = myvim.get_tenant_list(filter_dict=filter_dict)
+ elif item == "images":
+ content = myvim.get_image_list(filter_dict=filter_dict)
+ else:
+ raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+ logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
+ if name and len(content)==1:
+ return {item[:-1]: content[0]}
+ elif name and len(content)==0:
+ raise NfvoException("No {} found with ".format(item[:-1]) + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), filter_dict.iteritems())),
+ datacenter)
+ else:
+ return {item: content}
+ except vimconn.vimconnException as e:
+ print "vim_action Not possible to get_%s_list from VIM: %s " % (item, str(e))
+ raise NfvoException("Not possible to get_{}_list from VIM: {}".format(item, str(e)), e.http_code)
+
+
+ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
+ #get datacenter info
+ if tenant_id == "any":
+ tenant_id=None
+
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ #get uuid name
+ content = vim_action_get(mydb, tenant_id, datacenter, item, name)
+ logger.debug("vim_action_delete vim response: " + str(content))
+ items = content.values()[0]
+ if type(items)==list and len(items)==0:
+ raise NfvoException("Not found " + item, HTTP_Not_Found)
+ elif type(items)==list and len(items)>1:
+ raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), HTTP_Not_Found)
+ else: # it is a dict
+ item_id = items["id"]
+ item_name = str(items.get("name"))
+
+ try:
+ if item=="networks":
+ content = myvim.delete_network(item_id)
+ elif item=="tenants":
+ content = myvim.delete_tenant(item_id)
+ elif item == "images":
+ content = myvim.delete_image(item_id)
+ else:
+ raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+ except vimconn.vimconnException as e:
+ #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
+ raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
+
+ return "{} {} {} deleted".format(item[:-1], item_id,item_name)
+
+
+ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
+ #get datacenter info
+ logger.debug("vim_action_create descriptor %s", str(descriptor))
+ if tenant_id == "any":
+ tenant_id=None
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ try:
+ if item=="networks":
+ net = descriptor["network"]
+ net_name = net.pop("name")
+ net_type = net.pop("type", "bridge")
+ net_public = net.pop("shared", False)
+ net_ipprofile = net.pop("ip_profile", None)
+ net_vlan = net.pop("vlan", None)
+ content = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
+ elif item=="tenants":
+ tenant = descriptor["tenant"]
+ content = myvim.new_tenant(tenant["name"], tenant.get("description"))
+ else:
+ raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+ except vimconn.vimconnException as e:
+ raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
+
+ return vim_action_get(mydb, tenant_id, datacenter, item, content)
++
++def sdn_controller_create(mydb, tenant_id, sdn_controller):
++ data = ovim.new_of_controller(sdn_controller)
++ logger.debug('New SDN controller created with uuid {}'.format(data))
++ return data
++
++def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
++ data = ovim.edit_of_controller(controller_id, sdn_controller)
++ msg = 'SDN controller {} updated'.format(data)
++ logger.debug(msg)
++ return msg
++
++def sdn_controller_list(mydb, tenant_id, controller_id=None):
++ if controller_id == None:
++ data = ovim.get_of_controllers()
++ else:
++ data = ovim.show_of_controller(controller_id)
++
++ msg = 'SDN controller list:\n {}'.format(data)
++ logger.debug(msg)
++ return data
++
++def sdn_controller_delete(mydb, tenant_id, controller_id):
++ select_ = ('uuid', 'config')
++ datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
++ for datacenter in datacenters:
++ if datacenter['config']:
++ config = yaml.load(datacenter['config'])
++ if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
++ raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), HTTP_Conflict)
++
++ data = ovim.delete_of_controller(controller_id)
++ msg = 'SDN controller {} deleted'.format(data)
++ logger.debug(msg)
++ return msg
++
++def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
++ controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
++ if len(controller) < 1:
++ raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), HTTP_Not_Found)
++
++ try:
++ sdn_controller_id = yaml.load(controller[0]["config"])["sdn-controller"]
++ except:
++ raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), HTTP_Bad_Request)
++
++ sdn_controller = ovim.show_of_controller(sdn_controller_id)
++ switch_dpid = sdn_controller["dpid"]
++
++ maps = list()
++ for compute_node in sdn_port_mapping:
++ #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
++ element = dict()
++ element["compute_node"] = compute_node["compute_node"]
++ for port in compute_node["ports"]:
++ element["pci"] = port.get("pci")
++ element["switch_port"] = port.get("switch_port")
++ element["switch_mac"] = port.get("switch_mac")
++ if not element["pci"] or not (element["switch_port"] or element["switch_mac"]):
++ raise NfvoException ("The mapping must contain the 'pci' and at least one of the elements 'switch_port'"
++ " or 'switch_mac'", HTTP_Bad_Request)
++ maps.append(dict(element))
++
++ return ovim.set_of_port_mapping(maps, ofc_id=sdn_controller_id, switch_dpid=switch_dpid, region=datacenter_id)
++
++def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
++ maps = ovim.get_of_port_mappings(db_filter={"region": datacenter_id})
++
++ result = {
++ "sdn-controller": None,
++ "datacenter-id": datacenter_id,
++ "dpid": None,
++ "ports_mapping": list()
++ }
++
++ datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
++ if datacenter['config']:
++ config = yaml.load(datacenter['config'])
++ if 'sdn-controller' in config:
++ controller_id = config['sdn-controller']
++ sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
++ result["sdn-controller"] = controller_id
++ result["dpid"] = sdn_controller["dpid"]
++
++ if result["sdn-controller"] == None or result["dpid"] == None:
++ raise NfvoException("Not all SDN controller information for datacenter {} could be found: {}".format(datacenter_id, result),
++ HTTP_Internal_Server_Error)
++
++ if len(maps) == 0:
++ return result
++
++ ports_correspondence_dict = dict()
++ for link in maps:
++ if result["sdn-controller"] != link["ofc_id"]:
++ raise NfvoException("The sdn-controller specified for different port mappings differ", HTTP_Internal_Server_Error)
++ if result["dpid"] != link["switch_dpid"]:
++ raise NfvoException("The dpid specified for different port mappings differ", HTTP_Internal_Server_Error)
++ element = dict()
++ element["pci"] = link["pci"]
++ if link["switch_port"]:
++ element["switch_port"] = link["switch_port"]
++ if link["switch_mac"]:
++ element["switch_mac"] = link["switch_mac"]
++
++ if not link["compute_node"] in ports_correspondence_dict:
++ content = dict()
++ content["compute_node"] = link["compute_node"]
++ content["ports"] = list()
++ ports_correspondence_dict[link["compute_node"]] = content
++
++ ports_correspondence_dict[link["compute_node"]]["ports"].append(element)
++
++ for key in sorted(ports_correspondence_dict):
++ result["ports_mapping"].append(ports_correspondence_dict[key])
++
++ return result
++
++def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
++ return ovim.clear_of_port_mapping(db_filter={"region":datacenter_id})
--- /dev/null
- " ii.ip_address as ip_address, vim_info, i.type as type"\
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ NFVO DB engine. It implements all the methods to interact with the Openmano Database
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+ __date__ ="$28-aug-2014 10:05:01$"
+
+ import db_base
+ import MySQLdb as mdb
+ import json
+ import yaml
+ import time
+ #import sys, os
+
+ tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
+ "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
+ "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets"]
+
+ class nfvo_db(db_base.db_base):
+ def __init__(self, host=None, user=None, passwd=None, database=None, log_name='openmano.db', log_level=None):
+ db_base.db_base.__init__(self, host, user, passwd, database, log_name, log_level)
+ db_base.db_base.tables_with_created_field=tables_with_createdat_field
+ return
+
+ def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+ self.logger.debug("Adding new vnf to the NFVO database")
+ tries = 2
+ while tries:
+ created_time = time.time()
+ try:
+ with self.con:
+
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.iteritems():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
+ int(bridgeiface.get('port_security', True))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
+ int(bridgeiface.get('floating_ip', False))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ dataifacesDict[vm['name']][dataiface['name']] = {}
+ dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface['vpci']
+ dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
+ dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
+ 'dedicated'] == "yes" else (
+ "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ internalconnList = []
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ if ifaceItem ["type"] == "data":
+ dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = dataiface['vpci']
+ ifaceItem["bw"] = dataiface['bw']
+ ifaceItem["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = bridgeiface['vpci']
+ ifaceItem["mac"] = bridgeiface['mac']
+ ifaceItem["bw"] = bridgeiface['bw']
+ ifaceItem["model"] = bridgeiface['model']
+ ifaceItem["port_security"] = bridgeiface['port_security']
+ ifaceItem["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ internalconnList.append(ifaceItem)
+ #print "Internal net id in NFVO DB: %s" % net_id
+
+ #print "Adding internal interfaces to the NFVO database (if any)"
+ for iface in internalconnList:
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = dataiface['vpci']
+ myIfaceDict["bw"] = dataiface['bw']
+ myIfaceDict["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = bridgeiface['vpci']
+ myIfaceDict["bw"] = bridgeiface['bw']
+ myIfaceDict["model"] = bridgeiface['model']
+ myIfaceDict["mac"] = bridgeiface['mac']
+ myIfaceDict["port_security"]= bridgeiface['port_security']
+ myIfaceDict["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+ self.logger.debug("Adding new vnf to the NFVO database")
+ tries = 2
+ while tries:
+ created_time = time.time()
+ try:
+ with self.con:
+
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.iteritems():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ ifaceDict = {}
+ ifaceDict['vpci'] = bridgeiface.get('vpci',None)
+ ifaceDict['mac'] = bridgeiface.get('mac_address',None)
+ ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
+ ifaceDict['model'] = bridgeiface.get('model', None)
+ ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
+ ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
+ ifaceDict['created_time'] = created_time
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ ifaceDict = {}
+ ifaceDict['vpci'] = dataiface['vpci']
+ ifaceDict['bw'] = dataiface['bandwidth']
+ ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
+ ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ ifaceDict['created_time'] = created_time
+ dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ if (net["implementation"] == "overlay"):
+ net["type"] = "bridge"
+ #It should give an error if the type is e-line. For the moment, we consider it as a bridge
+ elif (net["implementation"] == "underlay"):
+ if (net["type"] == "e-line"):
+ net["type"] = "ptp"
+ elif (net["type"] == "e-lan"):
+ net["type"] = "data"
+ net.pop("implementation")
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ if "ip-profile" in net:
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {}
+ myIPProfileDict["net_id"] = net_id
+ myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
+ myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
+ myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
+ myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
+ if ("dhcp" in ip_profile):
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
+
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ ifaceItem["ip_address"] = element.get('ip_address',None)
+ if ifaceItem ["type"] == "data":
+ ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ else:
+ ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["mac"] = ifaceDict['mac']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ ifaceItem["port_security"] = ifaceDict['port_security']
+ ifaceItem["floating_ip"] = ifaceDict['floating_ip']
+ created_time_iface = ifaceDict["created_time"]
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ myIfaceDict["vpci"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
+ else:
+ myIfaceDict["vpci"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
+ myIfaceDict["port_security"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
+ myIfaceDict["floating_ip"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
+ created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ # except KeyError as e2:
+ # exc_type, exc_obj, exc_tb = sys.exc_info()
+ # fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
+ # self.logger.debug("Exception type: %s; Filename: %s; Line number: %s", exc_type, fname, exc_tb.tb_lineno)
+ # raise KeyError
+ tries -= 1
+
+ def new_scenario(self, scenario_dict):
+ tries = 2
+ while tries:
+ created_time = time.time()
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ tenant_id = scenario_dict.get('tenant_id')
+ #scenario
+ INSERT_={'tenant_id': tenant_id,
+ 'name': scenario_dict['name'],
+ 'description': scenario_dict['description'],
+ 'public': scenario_dict.get('public', "false")}
+
+ scenario_uuid = self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+ #sce_nets
+ for net in scenario_dict['nets'].values():
+ net_dict={'scenario_id': scenario_uuid}
+ net_dict["name"] = net["name"]
+ net_dict["type"] = net["type"]
+ net_dict["description"] = net.get("description")
+ net_dict["external"] = net.get("external", False)
+ if "graph" in net:
+ #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ net_dict["graph"]=json.dumps(net["graph"])
+ created_time += 0.00001
+ net_uuid = self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
+ net['uuid']=net_uuid
+
+ if net.get("ip-profile"):
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {
+ "sce_net_id": net_uuid,
+ "ip_version": ip_profile.get('ip-version', "IPv4"),
+ "subnet_address": ip_profile.get('subnet-address'),
+ "gateway_address": ip_profile.get('gateway-address'),
+ "dns_address": ip_profile.get('dns-address')}
+ if "dhcp" in ip_profile:
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
+ self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ # sce_vnfs
+ for k, vnf in scenario_dict['vnfs'].items():
+ INSERT_ = {'scenario_id': scenario_uuid,
+ 'name': k,
+ 'vnf_id': vnf['uuid'],
+ # 'description': scenario_dict['name']
+ 'description': vnf['description']}
+ if "graph" in vnf:
+ #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
+ # TODO, must be json because of the GUI, change to yaml
+ INSERT_["graph"] = json.dumps(vnf["graph"])
+ created_time += 0.00001
+ scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+ vnf['scn_vnf_uuid']=scn_vnf_uuid
+ # sce_interfaces
+ for iface in vnf['ifaces'].values():
+ # print 'iface', iface
+ if 'net_key' not in iface:
+ continue
+ iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
+ INSERT_={'sce_vnf_id': scn_vnf_uuid,
+ 'sce_net_id': iface['net_id'],
+ 'interface_id': iface['uuid'],
+ 'ip_address': iface.get('ip_address')}
+ created_time += 0.00001
+ iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+
+ return scenario_uuid
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def edit_scenario(self, scenario_dict):
+ tries = 2
+ while tries:
+ modified_time = time.time()
+ item_changed=0
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ #check that scenario exist
+ tenant_id = scenario_dict.get('tenant_id')
+ scenario_uuid = scenario_dict['uuid']
+
+ where_text = "uuid='{}'".format(scenario_uuid)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+
+ #scenario
+ nodes = {}
+ topology = scenario_dict.pop("topology", None)
+ if topology != None and "nodes" in topology:
+ nodes = topology.get("nodes",{})
+ UPDATE_ = {}
+ if "name" in scenario_dict: UPDATE_["name"] = scenario_dict["name"]
+ if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
+ if len(UPDATE_)>0:
+ WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
+ item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
+ #sce_nets
+ for node_id, node in nodes.items():
+ if "graph" in node:
+ #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ node["graph"] = json.dumps(node["graph"])
+ WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
+ #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
+ item_changed += self._update_rows('sce_nets', node, WHERE_)
+ item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
+ return item_changed
+
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ # def get_instance_scenario(self, instance_scenario_id, tenant_id=None):
+ # '''Obtain the scenario instance information, filtering by one or serveral of the tenant, uuid or name
+ # instance_scenario_id is the uuid or the name if it is not a valid uuid format
+ # Only one scenario isntance must mutch the filtering or an error is returned
+ # '''
+ # print "1******************************************************************"
+ # try:
+ # with self.con:
+ # self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ # #scenario table
+ # where_list=[]
+ # if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+ # if db_base._check_valid_uuid(instance_scenario_id):
+ # where_list.append( "uuid='" + instance_scenario_id +"'" )
+ # else:
+ # where_list.append( "name='" + instance_scenario_id +"'" )
+ # where_text = " AND ".join(where_list)
+ # self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text)
+ # rows = self.cur.fetchall()
+ # if self.cur.rowcount==0:
+ # return -HTTP_Bad_Request, "No scenario instance found with this criteria " + where_text
+ # elif self.cur.rowcount>1:
+ # return -HTTP_Bad_Request, "More than one scenario instance found with this criteria " + where_text
+ # instance_scenario_dict = rows[0]
+ #
+ # #instance_vnfs
+ # self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+ # instance_scenario_dict['instance_vnfs'] = self.cur.fetchall()
+ # for vnf in instance_scenario_dict['instance_vnfs']:
+ # #instance_vms
+ # self.cur.execute("SELECT uuid, vim_vm_id "+
+ # "FROM instance_vms "+
+ # "WHERE instance_vnf_id='" + vnf['uuid'] +"'"
+ # )
+ # vnf['instance_vms'] = self.cur.fetchall()
+ # #instance_nets
+ # self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+ # instance_scenario_dict['instance_nets'] = self.cur.fetchall()
+ #
+ # #instance_interfaces
+ # self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+ # instance_scenario_dict['instance_interfaces'] = self.cur.fetchall()
+ #
+ # db_base._convert_datetime2str(instance_scenario_dict)
+ # db_base._convert_str2boolean(instance_scenario_dict, ('public','shared','external') )
+ # print "2******************************************************************"
+ # return 1, instance_scenario_dict
+ # except (mdb.Error, AttributeError) as e:
+ # print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1])
+ # return self._format_error(e)
+
+ def get_scenario(self, scenario_id, tenant_id=None, datacenter_id=None):
+ '''Obtain the scenario information, filtering by one or serveral of the tenant, uuid or name
+ scenario_id is the uuid or the name if it is not a valid uuid format
+ if datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
+ Only one scenario must mutch the filtering or an error is returned
+ '''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+ scenario_dict = rows[0]
+ if scenario_dict["cloud_config"]:
+ scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"])
+ del scenario_dict["cloud_config"]
+ #sce_vnfs
+ cmd = "SELECT uuid,name,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnfs'] = self.cur.fetchall()
+ for vnf in scenario_dict['vnfs']:
+ #sce_interfaces
+ cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['interfaces'] = self.cur.fetchall()
+ #vms
+ cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, vms.name as name, vms.description as description, vms.boot_data as boot_data " \
+ " FROM vnfs join vms on vnfs.uuid=vms.vnf_id " \
+ " WHERE vnfs.uuid='" + vnf['vnf_id'] +"'" \
+ " ORDER BY vms.created_at"
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ if vm["boot_data"]:
+ vm["boot_data"] = yaml.safe_load(vm["boot_data"])
+ else:
+ del vm["boot_data"]
+ if datacenter_id!=None:
+ cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND datacenter_id='{}'".format(vm['image_id'],datacenter_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ if self.cur.rowcount==1:
+ vim_image_dict = self.cur.fetchone()
+ vm['vim_image_id']=vim_image_dict['vim_id']
+ cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND datacenter_id='{}'".format(vm['flavor_id'],datacenter_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ if self.cur.rowcount==1:
+ vim_flavor_dict = self.cur.fetchone()
+ vm['vim_flavor_id']=vim_flavor_dict['vim_id']
+
+ #interfaces
+ cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
+ "floating_ip, port_security" \
+ " FROM interfaces" \
+ " WHERE vm_id='{}'" \
+ " ORDER BY created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vm['interfaces'] = self.cur.fetchall()
+ for index in range(0,len(vm['interfaces'])):
+ vm['interfaces'][index]['port-security'] = vm['interfaces'][index].pop("port_security")
+ vm['interfaces'][index]['floating-ip'] = vm['interfaces'][index].pop("floating_ip")
+ #nets every net of a vms
+ cmd = "SELECT uuid,name,type,description FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['nets'] = self.cur.fetchall()
+ for vnf_net in vnf['nets']:
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ vnf_net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), db_base.HTTP_Bad_Request)
+
+ #sce_nets
+ cmd = "SELECT uuid,name,type,external,description" \
+ " FROM sce_nets WHERE scenario_id='{}'" \
+ " ORDER BY created_at ".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['nets'] = self.cur.fetchall()
+ #datacenter_nets
+ for net in scenario_dict['nets']:
+ if str(net['external']) == 'false':
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), db_base.HTTP_Bad_Request)
+ continue
+ WHERE_=" WHERE name='{}'".format(net['name'])
+ if datacenter_id!=None:
+ WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
+ cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ d_net = self.cur.fetchone()
+ if d_net==None or datacenter_id==None:
+ #print "nfvo_db.get_scenario() WARNING external net %s not found" % net['name']
+ net['vim_id']=None
+ else:
+ net['vim_id']=d_net['vim_net_id']
+
+ db_base._convert_datetime2str(scenario_dict)
+ db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+ return scenario_dict
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def delete_scenario(self, scenario_id, tenant_id=None):
+ '''Deletes a scenario, filtering by one or several of the tenant, uuid or name
+ scenario_id is the uuid or the name if it is not a valid uuid format
+ Only one scenario must mutch the filtering or an error is returned
+ '''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+
+ #scenario table
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found where " + where_text, db_base.HTTP_Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found where " + where_text, db_base.HTTP_Bad_Request)
+ scenario_uuid = rows[0]["uuid"]
+ scenario_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return scenario_uuid + " " + scenario_name
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries, "delete", "instances running")
+ tries -= 1
+
+ def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
+ tries = 2
+ while tries:
+ created_time = time.time()
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ #instance_scenarios
+ datacenter_id = scenarioDict['datacenter_id']
+ INSERT_={'tenant_id': tenant_id,
+ 'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
+ 'name': instance_scenario_name,
+ 'description': instance_scenario_description,
+ 'scenario_id' : scenarioDict['uuid'],
+ 'datacenter_id': datacenter_id
+ }
+ if scenarioDict.get("cloud-config"):
+ INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
+
+ instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+
+ net_scene2instance={}
+ #instance_nets #nets interVNF
+ for net in scenarioDict['nets']:
+ net_scene2instance[ net['uuid'] ] ={}
+ datacenter_site_id = net.get('datacenter_id', datacenter_id)
+ if not "vim_id_sites" in net:
+ net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
+ net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
+ sce_net_id = net.get("uuid")
+
+ for datacenter_site_id,vim_id in net["vim_id_sites"].iteritems():
+ INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
++ if not net.get('created', False):
++ INSERT_['status'] = "ACTIVE"
+ if sce_net_id:
+ INSERT_['sce_net_id'] = sce_net_id
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vnfs
+ for vnf in scenarioDict['vnfs']:
+ datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
+ INSERT_={'instance_scenario_id': instance_uuid, 'vnf_id': vnf['vnf_id'] }
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+ if vnf.get("uuid"):
+ INSERT_['sce_vnf_id'] = vnf['uuid']
+ created_time += 0.00001
+ instance_vnf_uuid = self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
+ vnf['uuid'] = instance_vnf_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_nets #nets intraVNF
+ for net in vnf['nets']:
+ net_scene2instance[ net['uuid'] ] = {}
+ INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
+ if net.get("uuid"):
+ INSERT_['net_id'] = net['uuid']
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vms
+ for vm in vnf['vms']:
+ INSERT_={'instance_vnf_id': instance_vnf_uuid, 'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id'] }
+ created_time += 0.00001
+ instance_vm_uuid = self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
+ vm['uuid'] = instance_vm_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_interfaces
+ for interface in vm['interfaces']:
+ net_id = interface.get('net_id', None)
+ if net_id is None:
+ #check if is connected to a inter VNFs net
+ for iface in vnf['interfaces']:
+ if iface['interface_id'] == interface['uuid']:
+ if 'ip_address' in iface:
+ interface['ip_address'] = iface['ip_address']
+ net_id = iface.get('sce_net_id', None)
+ break
+ if net_id is None:
+ continue
+ interface_type='external' if interface['external_name'] is not None else 'internal'
+ INSERT_={'instance_vm_id': instance_vm_uuid, 'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
+ 'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type': interface_type,
+ 'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
+ 'port_security': int(interface.get('port-security',True))}
+ #created_time += 0.00001
+ interface_uuid = self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
+ interface['uuid'] = interface_uuid #overwrite scnario uuid by instance uuid
+ return instance_uuid
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def get_instance_scenario(self, instance_id, tenant_id=None, verbose=False):
+ '''Obtain the instance information, filtering by one or several of the tenant, uuid or name
+ instance_id is the uuid or the name if it is not a valid uuid format
+ Only one instance must mutch the filtering or an error is returned
+ '''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+ #instance table
+ where_list=[]
+ if tenant_id is not None: where_list.append( "inst.tenant_id='" + tenant_id +"'" )
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append( "inst.uuid='" + instance_id +"'" )
+ else:
+ where_list.append( "inst.name='" + instance_id +"'" )
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT inst.uuid as uuid,inst.name as name,inst.scenario_id as scenario_id, datacenter_id" +\
+ " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" + \
+ " ,inst.description as description,inst.created_at as created_at" +\
+ " ,inst.cloud_config as 'cloud_config'" +\
+ " FROM instance_scenarios as inst join scenarios as s on inst.scenario_id=s.uuid"+\
+ " WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, db_base.HTTP_Not_Found)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text, db_base.HTTP_Bad_Request)
+ instance_dict = rows[0]
+ if instance_dict["cloud_config"]:
+ instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"])
+ del instance_dict["cloud_config"]
+
+ #instance_vnfs
+ cmd = "SELECT iv.uuid as uuid,sv.vnf_id as vnf_id,sv.name as vnf_name, sce_vnf_id, datacenter_id, datacenter_tenant_id"\
+ " FROM instance_vnfs as iv join sce_vnfs as sv on iv.sce_vnf_id=sv.uuid" \
+ " WHERE iv.instance_scenario_id='{}'" \
+ " ORDER BY iv.created_at ".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['vnfs'] = self.cur.fetchall()
+ for vnf in instance_dict['vnfs']:
+ vnf_manage_iface_list=[]
+ #instance vms
+ cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as created_at, name "\
+ " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
+ " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ vm_manage_iface_list=[]
+ #instance_interfaces
+ cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
- cmd = "SELECT uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, net_id as vnf_net_id, datacenter_id, datacenter_tenant_id"\
++ " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id"\
+ " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
+ " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd )
+ vm['interfaces'] = self.cur.fetchall()
+ for iface in vm['interfaces']:
+ if iface["type"] == "mgmt" and iface["ip_address"]:
+ vnf_manage_iface_list.append(iface["ip_address"])
+ vm_manage_iface_list.append(iface["ip_address"])
+ if not verbose:
+ del iface["type"]
+ if vm_manage_iface_list: vm["ip_address"] = ",".join(vm_manage_iface_list)
+ if vnf_manage_iface_list: vnf["ip_address"] = ",".join(vnf_manage_iface_list)
+
+ #instance_nets
+ #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
+ #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
+ # "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
+ #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
++ cmd = "SELECT uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id"\
+ " FROM instance_nets" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['nets'] = self.cur.fetchall()
+
+ db_base._convert_datetime2str(instance_dict)
+ db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
+ return instance_dict
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+ def delete_instance_scenario(self, instance_id, tenant_id=None):
+ '''Deletes a instance_Scenario, filtering by one or serveral of the tenant, uuid or name
+ instance_id is the uuid or the name if it is not a valid uuid format
+ Only one instance_scenario must mutch the filtering or an error is returned
+ '''
+ tries = 2
+ while tries:
+ try:
+ with self.con:
+ self.cur = self.con.cursor(mdb.cursors.DictCursor)
+
+ #instance table
+ where_list=[]
+ if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append( "uuid='" + instance_id +"'" )
+ else:
+ where_list.append( "name='" + instance_id +"'" )
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, db_base.HTTP_Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text, db_base.HTTP_Bad_Request)
+ instance_uuid = rows[0]["uuid"]
+ instance_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return instance_uuid + " " + instance_name
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries, "delete", "No dependences can avoid deleting!!!!")
+ tries -= 1
+
+ def new_instance_scenario(self, instance_scenario_dict, tenant_id):
+ #return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
+ return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
+
+ def update_instance_scenario(self, instance_scenario_dict):
+ #TODO:
+ return
+
+ def new_instance_vnf(self, instance_vnf_dict, tenant_id, instance_scenario_id = None):
+ #return self.new_row('vms', vm_dict, tenant_id, True, True)
+ return self._new_row_internal('instance_vnfs', instance_vnf_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_vnf(self, instance_vnf_dict):
+ #TODO:
+ return
+
+ def delete_instance_vnf(self, instance_vnf_id):
+ #TODO:
+ return
+
+ def new_instance_vm(self, instance_vm_dict, tenant_id, instance_scenario_id = None):
+ #return self.new_row('vms', vm_dict, tenant_id, True, True)
+ return self._new_row_internal('instance_vms', instance_vm_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_vm(self, instance_vm_dict):
+ #TODO:
+ return
+
+ def delete_instance_vm(self, instance_vm_id):
+ #TODO:
+ return
+
+ def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
+ return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_net(self, instance_net_dict):
+ #TODO:
+ return
+
+ def delete_instance_net(self, instance_net_id):
+ #TODO:
+ return
+
+ def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
+ return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_interface(self, instance_interface_dict):
+ #TODO:
+ return
+
+ def delete_instance_interface(self, instance_interface_dict):
+ #TODO:
+ return
+
+ def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
+ ''' Removes the old and adds the new net list at datacenter list for one datacenter.
+ Attribute
+ datacenter_id: uuid of the datacenter to act upon
+ table: table where to insert
+ new_net_list: the new values to be inserted. If empty it only deletes the existing nets
+ Return: (Inserted items, Deleted items) if OK, (-Error, text) if error
+ '''
+ tries = 2
+ while tries:
+ created_time = time.time()
+ try:
+ with self.con:
+ self.cur = self.con.cursor()
+ cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ inserted = 0
+ for new_net in new_net_list:
+ created_time += 0.00001
+ self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
+ inserted += 1
+ return inserted, deleted
+ except (mdb.Error, AttributeError) as e:
+ self._format_error(e, tries)
+ tries -= 1
+
+
--- /dev/null
- "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+ __date__ ="$09-oct-2014 09:09:48$"
+
+ #Basis schemas
+ patern_name="^[ -~]+$"
+ passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
+ nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
+ name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+ xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+ description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+ id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 } #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+ id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+ pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+ http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+ bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+ memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
+ integer0_schema={"type":"integer","minimum":0}
+ integer1_schema={"type":"integer","minimum":1}
+ path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+ vlan_schema={"type":"integer","minimum":1,"maximum":4095}
+ vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
+ mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"} #must be unicast LSB bit of MSB byte ==0
+ #mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
+ ip_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
+ ip_prefix_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
+ port_schema={"type":"integer","minimum":1,"maximum":65534}
+ object_schema={"type":"object"}
+ schema_version_2={"type":"integer","minimum":2,"maximum":2}
+ #schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
+ log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+ checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
+ size_schema={"type":"integer","minimum":1,"maximum":100}
+
+ metadata_schema={
+ "type":"object",
+ "properties":{
+ "architecture": {"type":"string"},
+ "use_incremental": {"type":"string","enum":["yes","no"]},
+ "vpci": pci_schema,
+ "os_distro": {"type":"string"},
+ "os_type": {"type":"string"},
+ "os_version": {"type":"string"},
+ "bus": {"type":"string"},
+ "topology": {"type":"string", "enum": ["oneSocket"]}
+ }
+ }
+
+ #Schema for the configuration file
+ config_schema = {
+ "title":"configuration response information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "http_port": port_schema,
+ "http_admin_port": port_schema,
+ "http_host": nameshort_schema,
+ "auto_push_VNF_to_VIMs": {"type":"boolean"},
+ "vnf_repository": path_schema,
+ "db_host": nameshort_schema,
+ "db_user": nameshort_schema,
+ "db_passwd": {"type":"string"},
+ "db_name": nameshort_schema,
++ "db_ovim_host": nameshort_schema,
++ "db_ovim_user": nameshort_schema,
++ "db_ovim_passwd": {"type":"string"},
++ "db_ovim_name": nameshort_schema,
+ # Next fields will disappear once the MANO API includes appropriate primitives
+ "vim_url": http_schema,
+ "vim_url_admin": http_schema,
+ "vim_name": nameshort_schema,
+ "vim_tenant_name": nameshort_schema,
+ "mano_tenant_name": nameshort_schema,
+ "mano_tenant_id": id_schema,
+ "http_console_proxy": {"type":"boolean"},
+ "http_console_host": nameshort_schema,
+ "http_console_ports": {
+ "type": "array",
+ "items": {"OneOf" : [
+ port_schema,
+ {"type":"object", "properties":{"from": port_schema, "to": port_schema}, "required": ["from","to"]}
+ ]}
+ },
+ "log_level": log_level_schema,
+ "log_socket_level": log_level_schema,
+ "log_level_db": log_level_schema,
+ "log_level_vim": log_level_schema,
+ "log_level_nfvo": log_level_schema,
+ "log_level_http": log_level_schema,
+ "log_level_console": log_level_schema,
++ "log_level_ovim": log_level_schema,
+ "log_file_db": path_schema,
+ "log_file_vim": path_schema,
+ "log_file_nfvo": path_schema,
+ "log_file_http": path_schema,
+ "log_file_console": path_schema,
++ "log_file_ovim": path_schema,
+ "log_socket_host": nameshort_schema,
+ "log_socket_port": port_schema,
+ "log_file": path_schema,
+ },
++ "required": ['db_user', 'db_passwd', 'db_name'],
+ "additionalProperties": False
+ }
+
+ tenant_schema = {
+ "title":"tenant information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "tenant":{
+ "type":"object",
+ "properties":{
+ "name": nameshort_schema,
+ "description": description_schema,
+ },
+ "required": ["name"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["tenant"],
+ "additionalProperties": False
+ }
+
+ tenant_edit_schema = {
+ "title":"tenant edit information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "tenant":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ },
+ "additionalProperties": False
+ }
+ },
+ "required": ["tenant"],
+ "additionalProperties": False
+ }
+
+ datacenter_schema_properties={
+ "name": name_schema,
+ "description": description_schema,
+ "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarged with plugins
+ "vim_url": description_schema,
+ "vim_url_admin": description_schema,
+ "config": { "type":"object" }
+ }
+
+ datacenter_schema = {
+ "title":"datacenter information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type":"object",
+ "properties":datacenter_schema_properties,
+ "required": ["name", "vim_url"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+ }
+
+
+ datacenter_edit_schema = {
+ "title":"datacenter edit nformation schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type":"object",
+ "properties":datacenter_schema_properties,
+ "additionalProperties": False
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+ }
+
+
+ netmap_new_schema = {
+ "title":"netmap new information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "netmap":{ #delete from datacenter
+ "type":"object",
+ "properties":{
+ "name": name_schema, #name or uuid of net to change
+ "vim_id": id_schema,
+ "vim_name": name_schema
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ },
+ "required": ["netmap"],
+ "additionalProperties": False
+ }
+
+ netmap_edit_schema = {
+ "title":"netmap edit information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "netmap":{ #delete from datacenter
+ "type":"object",
+ "properties":{
+ "name": name_schema, #name or uuid of net to change
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ },
+ "required": ["netmap"],
+ "additionalProperties": False
+ }
+
+ datacenter_action_schema = {
+ "title":"datacenter action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "net-update":{"type":"null",},
+ "net-edit":{
+ "type":"object",
+ "properties":{
+ "net": name_schema, #name or uuid of net to change
+ "name": name_schema,
+ "description": description_schema,
+ "shared": {"type": "boolean"}
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ "net-delete":{
+ "type":"object",
+ "properties":{
+ "net": name_schema, #name or uuid of net to change
+ },
+ "required": ["net"],
+ "additionalProperties": False
+ },
+ },
+ "minProperties": 1,
+ "maxProperties": 1,
+ "additionalProperties": False
+ }
+
+
+ datacenter_associate_schema={
+ "title":"datacenter associate information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type":"object",
+ "properties":{
+ "vim_tenant": name_schema,
+ "vim_tenant_name": name_schema,
+ "vim_username": nameshort_schema,
+ "vim_password": nameshort_schema,
+ "config": {"type": "object"}
+ },
+ # "required": ["vim_tenant"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+ }
+
+ dhcp_schema = {
+ "title":"DHCP schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "enabled": {"type": "boolean"},
+ "start-address": ip_schema,
+ "count": integer1_schema
+ },
+ "required": ["enabled", "start-address", "count"],
+ }
+
+ ip_profile_schema = {
+ "title":"IP profile schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "ip-version": {"type":"string", "enum":["IPv4","IPv6"]},
+ "subnet-address": ip_prefix_schema,
+ "gateway-address": ip_schema,
+ "dns-address": ip_schema,
+ "dhcp": dhcp_schema
+ },
+ }
+
+ key_pair_schema = {
+ "title": "Key-pair schema for cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "key": {"type":"string"}
+ },
+ "required": ["key"],
+ "additionalProperties": False
+ }
+
+ cloud_config_user_schema = {
+ "title": "User schema for cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name": nameshort_schema,
+ "user-info": {"type":"string"},
+ #"key-pairs": {"type" : "array", "items": key_pair_schema}
+ "key-pairs": {"type" : "array", "items": {"type":"string"}}
+ },
+ "required": ["name"],
+ "additionalProperties": False
+ }
+
+ cloud_config_schema = {
+ "title": "Cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ #"key-pairs": {"type" : "array", "items": key_pair_schema},
+ "key-pairs": {"type" : "array", "items": {"type":"string"}},
+ "users": {"type" : "array", "items": cloud_config_user_schema}
+ },
+ "additionalProperties": False
+ }
+
+ internal_connection_element_schema = {
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema
+ }
+ }
+
+ internal_connection_element_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema,
+ "ip_address": ip_schema
+ }
+ }
+
+ internal_connection_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description":description_schema,
+ "type":{"type":"string", "enum":["bridge","data","ptp"]},
+ "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":2}
+ },
+ "required": ["name", "type", "elements"],
+ "additionalProperties": False
+ }
+
+ internal_connection_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description":description_schema,
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "ip-profile": ip_profile_schema,
+ "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":2}
+ },
+ "required": ["name", "type", "implementation", "elements"],
+ "additionalProperties": False
+ }
+
+ external_connection_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "type":{"type":"string", "enum":["mgmt","bridge","data"]},
+ "VNFC": name_schema,
+ "local_iface_name": name_schema ,
+ "description":description_schema
+ },
+ "required": ["name", "type", "VNFC", "local_iface_name"],
+ "additionalProperties": False
+ }
+
+ #Not yet used
+ external_connection_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "mgmt": {"type":"boolean"},
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "VNFC": name_schema,
+ "local_iface_name": name_schema ,
+ "description":description_schema
+ },
+ "required": ["name", "type", "VNFC", "local_iface_name"],
+ "additionalProperties": False
+ }
+
+ interfaces_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+ "bandwidth":bandwidth_schema,
+ "vpci":pci_schema,
+ "mac_address": mac_schema
+ },
+ "additionalProperties": False,
+ "required": ["name","dedicated", "bandwidth"]
+ }
+ }
+
+ bridge_interfaces_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "bandwidth":bandwidth_schema,
+ "vpci":pci_schema,
+ "mac_address": mac_schema,
+ "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]},
+ "port-security": {"type" : "boolean"},
+ "floating-ip": {"type" : "boolean"}
+ },
+ "additionalProperties": False,
+ "required": ["name"]
+ }
+ }
+
+ devices_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "type":{"type":"string", "enum":["disk","cdrom","xml"] },
+ "image": path_schema,
+ "image name": name_schema,
+ "image checksum": checksum_schema,
+ "image metadata": metadata_schema,
+ "size": size_schema,
+ "vpci":pci_schema,
+ "xml":xml_text_schema,
+ },
+ "additionalProperties": False,
+ "required": ["type"]
+ }
+ }
+
+
+ numa_schema = {
+ "type": "object",
+ "properties": {
+ "memory":integer1_schema,
+ "cores":integer1_schema,
+ "paired-threads":integer1_schema,
+ "threads":integer1_schema,
+ "cores-id":{"type":"array","items":integer0_schema},
+ "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+ "threads-id":{"type":"array","items":integer0_schema},
+ "interfaces":interfaces_schema
+ },
+ "additionalProperties": False,
+ #"required": ["memory"]
+ }
+
+ config_files_schema = {
+ "title": "Config files for cloud init schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "dest": path_schema,
+ "encoding": {"type": "string", "enum": ["b64", "base64", "gz", "gz+b64", "gz+base64", "gzip+b64", "gzip+base64"]}, #by default text
+ "content": {"type": "string"},
+ "permissions": {"type": "string"}, # tiypically octal notation '0644'
+ "owner": {"type": "string"}, # format: owner:group
+
+ },
+ "additionalProperties": False,
+ "required": ["dest", "content"],
+ }
+
+ boot_data_vdu_schema = {
+ "title": "Boot data (Cloud-init) configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties":{
+ "key-pairs": {"type" : "array", "items": {"type":"string"}},
+ "users": {"type" : "array", "items": cloud_config_user_schema},
+ "user-data": {"type" : "string"}, # scrip to run
+ "config-files": {"type": "array", "items": config_files_schema},
+ # NOTE: “user-data” are mutually exclusive with users and config-files because user/files are injected using user-data
+ "boot-data-drive": {"type": "boolean"},
+ },
+ "additionalProperties": False,
+ }
+
+ vnfc_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "VNFC image": {"oneOf": [path_schema, http_schema]},
+ "image name": name_schema,
+ "image checksum": checksum_schema,
+ "image metadata": metadata_schema,
+ #"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+ "processor": {
+ "type":"object",
+ "properties":{
+ "model":description_schema,
+ "features":{"type":"array","items":nameshort_schema}
+ },
+ "required": ["model"],
+ "additionalProperties": False
+ },
+ "hypervisor": {
+ "type":"object",
+ "properties":{
+ "type":nameshort_schema,
+ "version":description_schema
+ },
+ },
+ "ram":integer0_schema,
+ "vcpus":integer0_schema,
+ "disk": integer1_schema,
+ "numas": {
+ "type": "array",
+ "items": numa_schema
+ },
+ "bridge-ifaces": bridge_interfaces_schema,
+ "devices": devices_schema,
+ "boot-data" : boot_data_vdu_schema
+
+ },
+ "required": ["name"],
+ "oneOf": [
+ {"required": ["VNFC image"]},
+ {"required": ["image name"]}
+ ],
+ "additionalProperties": False
+ }
+
+ vnfd_schema_v01 = {
+ "title":"vnfd information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "vnf":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "class": nameshort_schema,
+ "public": {"type" : "boolean"},
+ "physical": {"type" : "boolean"},
+ "tenant_id": id_schema, #only valid for admin
+ "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+ "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
+ "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+ },
+ "required": ["name","external-connections"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["vnf"],
+ "additionalProperties": False
+ }
+
+ #VNFD schema for OSM R1
+ vnfd_schema_v02 = {
+ "title":"vnfd information schema v0.2",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": {"type": "string", "enum": ["0.2"]},
+ "vnf":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "class": nameshort_schema,
+ "public": {"type" : "boolean"},
+ "physical": {"type" : "boolean"},
+ "tenant_id": id_schema, #only valid for admin
+ "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+ "internal-connections": {"type" : "array", "items": internal_connection_schema_v02, "minItems":1},
+ # "cloud-config": cloud_config_schema, #common for all vnfcs
+ "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+ },
+ "required": ["name"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["vnf", "schema_version"],
+ "additionalProperties": False
+ }
+
+ #vnfd_schema = vnfd_schema_v01
+ #{
+ # "title":"vnfd information schema v0.2",
+ # "$schema": "http://json-schema.org/draft-04/schema#",
+ # "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
+ #}
+
+ graph_schema = {
+ "title":"graphical scenario descriptor information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "x": integer0_schema,
+ "y": integer0_schema,
+ "ifaces": {
+ "type":"object",
+ "properties":{
+ "left": {"type":"array"},
+ "right": {"type":"array"},
+ "bottom": {"type":"array"},
+ }
+ }
+ },
+ "required": ["x","y"]
+ }
+
+ nsd_schema_v01 = {
+ "title":"network scenario descriptor information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": {"type": "boolean"},
+ "topology":{
+ "type":"object",
+ "properties":{
+ "nodes": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ },
+ "patternProperties":{
+ "^(VNF )?model$": {"type": "string"}
+ },
+ "required": ["type"]
+ }
+ }
+ },
+ "connections": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
+ "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
+ "graph": graph_schema
+ },
+ "required": ["nodes"]
+ },
+ }
+ }
+ },
+ "required": ["nodes"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["name","topology"],
+ "additionalProperties": False
+ }
+
+ nsd_schema_v02 = {
+ "title":"network scenario descriptor information schema v0.2",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": schema_version_2,
+ "scenario":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": {"type": "boolean"},
+ "vnfs": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ "vnf_name": name_schema,
+ },
+ }
+ },
+ "minProperties": 1
+ },
+ "networks": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{"type":"array", "minLength":1},
+ "type": {"type": "string", "enum":["dataplane", "bridge"]},
+ "external" : {"type": "boolean"},
+ "graph": graph_schema
+ },
+ "required": ["interfaces"]
+ },
+ }
+ },
+
+ },
+ "required": ["vnfs", "name"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["scenario","schema_version"],
+ "additionalProperties": False
+ }
+
+ #NSD schema for OSM R1
+ nsd_schema_v03 = {
+ "title":"network scenario descriptor information schema v0.3",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": {"type": "string", "enum": ["0.3"]},
+ "scenario":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": {"type": "boolean"},
+ "cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+ #"datacenter": name_schema,
+ "vnfs": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ "vnf_name": name_schema,
+ #"cloud-config": cloud_config_schema, #particular for a vnf
+ #"datacenter": name_schema,
+ "internal-connections": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "ip-profile": ip_profile_schema,
+ "elements": {
+ "type" : "array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema,
+ "ip_address": ip_schema
+ },
+ "required": ["VNFC", "local_iface_name"],
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ },
+ "minProperties": 1
+ },
+ "networks": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ "vnf": name_schema,
+ "vnf_interface": name_schema,
+ "ip_address": ip_schema
+ },
+ "required": ["vnf", "vnf_interface"],
+ }
+ },
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "external" : {"type": "boolean"},
+ "graph": graph_schema,
+ "ip-profile": ip_profile_schema
+ },
+ "required": ["interfaces"]
+ },
+ }
+ },
+
+ },
+ "required": ["vnfs", "networks","name"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["scenario","schema_version"],
+ "additionalProperties": False
+ }
+
+ #scenario_new_schema = {
+ # "title":"new scenario information schema",
+ # "$schema": "http://json-schema.org/draft-04/schema#",
+ # #"oneOf": [nsd_schema_v01, nsd_schema_v02]
+ # "oneOf": [nsd_schema_v01]
+ #}
+
+ scenario_edit_schema = {
+ "title":"edit scenario information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "description": description_schema,
+ "topology":{
+ "type":"object",
+ "properties":{
+ "nodes": {
+ "type":"object",
+ "patternProperties":{
+ "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
+ "type":"object",
+ "properties":{
+ "graph":{
+ "type": "object",
+ "properties":{
+ "x": integer0_schema,
+ "y": integer0_schema,
+ "ifaces":{ "type": "object"}
+ }
+ },
+ "description": description_schema,
+ "name": name_schema
+ }
+ }
+ }
+ }
+ },
+ "required": ["nodes"],
+ "additionalProperties": False
+ }
+ },
+ "additionalProperties": False
+ }
+
+ scenario_action_schema = {
+ "title":"scenario action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "start":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "deploy":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "reserve":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "verify":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ }
+ },
+ "minProperties": 1,
+ "maxProperties": 1,
+ "additionalProperties": False
+ }
+
+ instance_scenario_create_schema_v01 = {
+ "title":"instance scenario create information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": {"type": "string", "enum": ["0.1"]},
+ "instance":{
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "description":description_schema,
+ "datacenter": name_schema,
+ "scenario" : name_schema, #can be an UUID or name
+ "action":{"enum": ["deploy","reserve","verify" ]},
+ "connect_mgmt_interfaces": {"oneOf": [{"type":"boolean"}, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+ "cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
+ "vnfs":{ #mapping from scenario to datacenter
+ "type": "object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "name": name_schema, #override vnf name
+ "datacenter": name_schema,
+ #"metadata": {"type": "object"},
+ #"user_data": {"type": "string"}
+ #"cloud-config": cloud_config_schema, #particular for a vnf
+ "external-connections": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "vim-network-name": name_schema,
+ "ip_address": ip_schema
+ }
+ }
+ }
+ },
+ "internal-connections": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "ip-profile": ip_profile_schema,
+ "elements": {
+ "type" : "array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema,
+ "ip_address": ip_schema
+ },
+ "required": ["VNFC", "local_iface_name"],
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ },
+ "networks":{ #mapping from scenario to datacenter
+ "type": "object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ "ip_address": ip_schema,
+ "datacenter": name_schema,
+ "vim-network-name": name_schema
+ },
+ "patternProperties":{
+ ".": {"type": "string"}
+ }
+ }
+ },
+ "ip-profile": ip_profile_schema,
+ #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
+ "sites": {
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ # By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
+ # for other networks openamno creates at VIM
+ # Use netmap-create to force to create an external scenario network
+ "netmap-create": {"oneOf":[name_schema,{"type": "null"}]}, #datacenter network to use. Null if must be created as an internal net
+ #netmap-use: Indicates an existing VIM network that must be used for this scenario network.
+ #Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
+ #If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
+ #In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
+ "netmap-use": name_schema, #
+ "vim-network-name": name_schema, #override network name
+ #"ip-profile": ip_profile_schema,
+ "datacenter": name_schema,
+ }
+ }
+ },
+
+
+
+ }
+ }
+ },
+ },
+ },
+ "additionalProperties": False,
+ "required": ["name"]
+ },
+ },
+ "required": ["instance"],
+ "additionalProperties": False
+ }
+
+ instance_scenario_action_schema = {
+ "title":"instance scenario action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "start":{"type": "null"},
+ "pause":{"type": "null"},
+ "resume":{"type": "null"},
+ "shutoff":{"type": "null"},
+ "shutdown":{"type": "null"},
+ "forceOff":{"type": "null"},
+ "rebuild":{"type": "null"},
+ "reboot":{
+ "type": ["object","null"],
+ },
+ "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
+ "vnfs":{"type": "array", "items":{"type":"string"}},
+ "vms":{"type": "array", "items":{"type":"string"}}
+ },
+ "minProperties": 1,
+ #"maxProperties": 1,
+ "additionalProperties": False
+ }
++
++sdn_controller_properties={
++ "name": name_schema,
++ "dpid": {"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){7}$"},
++ "ip": ip_schema,
++ "port": port_schema,
++ "type": {"type": "string", "enum": ["opendaylight","floodlight","onos"]},
++ "version": {"type" : "string", "minLength":1, "maxLength":12},
++ "user": nameshort_schema,
++ "password": passwd_schema
++}
++sdn_controller_schema = {
++ "title":"sdn controller information schema",
++ "$schema": "http://json-schema.org/draft-04/schema#",
++ "type":"object",
++ "properties":{
++ "sdn_controller":{
++ "type":"object",
++ "properties":sdn_controller_properties,
++ "required": ["name", "port", 'ip', 'dpid', 'type'],
++ "additionalProperties": False
++ }
++ },
++ "required": ["sdn_controller"],
++ "additionalProperties": False
++}
++
++sdn_controller_edit_schema = {
++ "title":"sdn controller update information schema",
++ "$schema": "http://json-schema.org/draft-04/schema#",
++ "type":"object",
++ "properties":{
++ "sdn_controller":{
++ "type":"object",
++ "properties":sdn_controller_properties,
++ "additionalProperties": False
++ }
++ },
++ "required": ["sdn_controller"],
++ "additionalProperties": False
++}
++
++sdn_port_mapping_schema = {
++ "$schema": "http://json-schema.org/draft-04/schema#",
++ "title":"sdn port mapping information schema",
++ "type": "object",
++ "properties": {
++ "sdn_port_mapping": {
++ "type": "array",
++ "items": {
++ "type": "object",
++ "properties": {
++ "compute_node": nameshort_schema,
++ "ports": {
++ "type": "array",
++ "items": {
++ "type": "object",
++ "properties": {
++ "pci": pci_schema,
++ "switch_port": nameshort_schema,
++ "switch_mac": mac_schema
++ },
++ "required": ["pci"]
++ }
++ }
++ },
++ "required": ["compute_node", "ports"]
++ }
++ }
++ },
++ "required": ["sdn_port_mapping"]
++}
--- /dev/null
-__author__="Alfonso Tierno"
+ #!/usr/bin/env python3
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ openmano python client used to interact with openmano-server
+ '''
-__version__="0.0.1-r467"
-version_date="Mar 2016"
++__author__="Alfonso Tierno, Pablo Montes"
+ __date__ ="$09-Mar-2016 09:09:48$"
++__version__="0.0.2-r468"
++version_date="Feb 2017"
+
+ import requests
+ import json
+ import yaml
+ import logging
+ import sys
+ if sys.version_info.major == 3:
+ from urllib.parse import quote
+ elif sys.version_info.major == 2:
+ from urllib import quote
+
+ class OpenmanoException(Exception):
+ '''Common Exception for all openmano client exceptions'''
+
+ class OpenmanoBadParamsException(OpenmanoException):
+ '''Bad or missing input parameters'''
+
+ class OpenmanoResponseException(OpenmanoException):
+ '''Unexpected response from openmano server'''
+
+ class OpenmanoNotFoundException(OpenmanoException):
+ '''Not found at server'''
+
+ # class vnf():
+ # def __init__(self, message):
+ # print "Error: %s" %message
+ # print
+ # self.print_usage()
+ # #self.print_help()
+ # print
+ # print "Type 'openmano -h' for help"
+
+ class openmanoclient():
+ headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'}
+
+ def __init__(self, **kwargs):
+ self.username = kwargs.get("username")
+ self.password = kwargs.get("password")
+ self.endpoint_url = kwargs.get("endpoint_url")
+ self.tenant_id = kwargs.get("tenant_id")
+ self.tenant_name = kwargs.get("tenant_name")
+ self.tenant = None
+ self.datacenter_id = kwargs.get("datacenter_id")
+ self.datacenter_name = kwargs.get("datacenter_name")
+ self.datacenter = None
+ self.logger = logging.getLogger(kwargs.get('logger','manoclient'))
+ if kwargs.get("debug"):
+ self.logger.setLevel(logging.DEBUG)
+
+ def __getitem__(self, index):
+ if index=='tenant_name':
+ return self.tenant_name
+ elif index=='tenant_id':
+ return self.tenant_id
+ elif index=='datacenter_name':
+ return self.datacenter_name
+ elif index=='datacenter_id':
+ return self.datacenter_id
+ elif index=='username':
+ return self.username
+ elif index=='password':
+ return self.password
+ elif index=='endpoint_url':
+ return self.endpoint_url
+ else:
+ raise KeyError("Invalid key '%s'" %str(index))
+
+ def __setitem__(self,index, value):
+ if index=='tenant_name':
+ self.tenant_name = value
+ elif index=='tenant_id':
+ self.tenant_id = value
+ elif index=='datacenter_name':
+ self.datacenter_name = value
+ elif index=='datacenter_id':
+ self.datacenter_id = value
+ elif index=='username':
+ self.username = value
+ elif index=='password':
+ self.password = value
+ elif index=='endpoint_url':
+ self.endpoint_url = value
+ else:
+ raise KeyError("Invalid key '%s'" %str(index))
+ self.tenant = None # force to reload tenant with different credentials
+ self.datacenter = None # force to reload datacenter with different credentials
+
+ def _parse(self, descriptor, descriptor_format, response=False):
+ #try yaml
+ if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml":
+ raise OpenmanoBadParamsException("'descriptor_format' must be a 'json' or 'yaml' text")
+ if descriptor_format != "json":
+ try:
+ return yaml.load(descriptor)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+ error_text = "yaml format error" + error_pos
+ elif descriptor_format != "yaml":
+ try:
+ return json.loads(descriptor)
+ except Exception as e:
+ if response:
+ error_text = "json format error" + str(e)
+
+ if response:
+ raise OpenmanoResponseException(error_text)
+ raise OpenmanoBadParamsException(error_text)
+
+ def _parse_yaml(self, descriptor, response=False):
+ try:
+ return yaml.load(descriptor)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+ error_text = "yaml format error" + error_pos
+ if response:
+ raise OpenmanoResponseException(error_text)
+ raise OpenmanoBadParamsException(error_text)
+
+
+ def _get_item_uuid(self, item, item_id=None, item_name=None, all_tenants=False):
+ if all_tenants == None:
+ tenant_text = ""
+ elif all_tenants == False:
+ tenant_text = "/" + self.tenant
+ else:
+ tenant_text = "/any"
+ URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ #print content
+ found = 0
+ if not item_id and not item_name:
+ raise OpenmanoResponseException("Missing either {0}_name or {0}_id".format(item[:-1]))
+ for i in content[item]:
+ if item_id and i["uuid"] == item_id:
+ return item_id
+ elif item_name and i["name"] == item_name:
+ uuid = i["uuid"]
+ found += 1
+
+ if found == 0:
+ if item_id:
+ raise OpenmanoNotFoundException("No {} found with id '{}'".format(item[:-1], item_id))
+ else:
+ #print(item, item_name)
+ raise OpenmanoNotFoundException("No {} found with name '{}'".format(item[:-1], item_name) )
+ elif found > 1:
+ raise OpenmanoNotFoundException("{} {} found with name '{}'. uuid must be used".format(found, item, item_name))
+ return uuid
+
+ def _get_item(self, item, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _get_tenant(self):
+ if not self.tenant:
+ self.tenant = self._get_item_uuid("tenants", self.tenant_id, self.tenant_name, None)
+ return self.tenant
+
+ def _get_datacenter(self):
+ if not self.tenant:
+ self._get_tenant()
+ if not self.datacenter:
+ self.datacenter = self._get_item_uuid("datacenters", self.datacenter_id, self.datacenter_name, False)
+ return self.datacenter
+
+ def _create_item(self, item, descriptor, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ payload_req = yaml.safe_dump(descriptor)
+
+ #print payload_req
+
+ URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _del_item(self, item, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("DELETE %s", URLrequest )
+ mano_response = requests.delete(URLrequest, headers = self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _list_item(self, item, all_tenants=False, filter_dict=None):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+ separator="?"
+ if filter_dict:
+ for k in filter_dict:
+ URLrequest += separator + quote(str(k)) + "=" + quote(str(filter_dict[k]))
+ separator = "&"
+ self.logger.debug("openmano GET %s", URLrequest)
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _edit_item(self, item, descriptor, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid("tenants", uuid, name, all_tenants)
+
+ payload_req = yaml.safe_dump(descriptor)
+
+ #print payload_req
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("openmano PUT %s %s", URLrequest, payload_req)
+ mano_response = requests.put(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ #TENANTS
+ def list_tenants(self, **kwargs):
+ '''Obtain a list of tenants
+ Params: can be filtered by 'uuid','name','description'
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenants':[{tenant1_info},{tenant2_info},...]}}
+ '''
+ return self._list_item("tenants", all_tenants=None, filter_dict=kwargs)
+
+ def get_tenant(self, uuid=None, name=None):
+ '''Obtain the information of a tenant
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'tenant':{tenant_info}}
+ '''
+ return self._get_item("tenants", uuid, name, all_tenants=None)
+
+ def delete_tenant(self, uuid=None, name=None):
+ '''Delete a tenant
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("tenants", uuid, name, all_tenants=None)
+
+ def create_tenant(self, descriptor=None, descriptor_format=None, name=None, description=None):
+ '''Creates a tenant
+ Params: must supply a descriptor or/and just a name
+ descriptor: with format {'tenant':{new_tenant_info}}
+ newtenant_info must contain 'name', and optionally 'description'
+ must be a dictionary or a json/yaml text.
+ name: the tenant name. Overwrite descriptor name if any
+ description: tenant descriptor.. Overwrite descriptor description if any
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenant':{new_tenant_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self._parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name:
+ descriptor={"tenant": {"name": name}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'tenant' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if name:
+ descriptor['tenant']['name'] = name
+ if description:
+ descriptor['tenant']['description'] = description
+
+ return self._create_item("tenants", descriptor, all_tenants=None)
+
+ def edit_tenant(self, uuid=None, name=None, descriptor=None, descriptor_format=None, new_name=None, new_description=None):
+ '''Edit the parameters of a tenant
+ Params: must supply a descriptor or/and a new_name or new_description
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'tenant':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ name: the tenant name. Overwrite descriptor name if any
+ description: tenant descriptor.. Overwrite descriptor description if any
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'tenant':{newtenant_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif new_name or new_description:
+ descriptor={"tenant": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'tenant' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if new_name:
+ descriptor['tenant']['name'] = new_name
+ if new_description:
+ descriptor['tenant']['description'] = new_description
+
+ return self._edit_item("tenants", descriptor, uuid, name, all_tenants=None)
+
+ #DATACENTERS
+
+ def list_datacenters(self, all_tenants=False, **kwargs):
+ '''Obtain a list of datacenters, that are the VIM information at openmano
+ Params: can be filtered by 'uuid','name','vim_url','type'
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'datacenters':[{datacenter1_info},{datacenter2_info},...]}}
+ '''
+ return self._list_item("datacenters", all_tenants, filter_dict=kwargs)
+
+ def get_datacenter(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a datacenter
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'datacenter':{datacenter_info}}
+ '''
+ return self._get_item("datacenters", uuid, name, all_tenants)
+
+ def delete_datacenter(self, uuid=None, name=None):
+ '''Delete a datacenter
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ if not uuid:
+ # check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+ return self._del_item("datacenters", uuid, name, all_tenants=None)
+
+ def create_datacenter(self, descriptor=None, descriptor_format=None, name=None, vim_url=None, **kwargs):
+ #, type="openvim", public=False, description=None):
+ '''Creates a datacenter
+ Params: must supply a descriptor or/and just a name and vim_url
+ descriptor: with format {'datacenter':{new_datacenter_info}}
+ newdatacenter_info must contain 'name', 'vim_url', and optionally 'description'
+ must be a dictionary or a json/yaml text.
+ name: the datacenter name. Overwrite descriptor name if any
+ vim_url: the datacenter URL. Overwrite descriptor vim_url if any
+ vim_url_admin: the datacenter URL for administrative issues. Overwrite descriptor vim_url if any
+ vim_type: the datacenter type, can be openstack or openvim. Overwrite descriptor type if any
+ public: boolean, by default not public
+ description: datacenter description. Overwrite descriptor description if any
+ config: dictionary with extra configuration for the concrete datacenter
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name and vim_url:
+ descriptor={"datacenter": {"name": name, "vim_url": vim_url}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor, or name and vim_url")
+
+ if 'datacenter' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+ if name:
+ descriptor['datacenter']['name'] = name
+ if vim_url:
+ descriptor['datacenter']['vim_url'] = vim_url
+ for param in kwargs:
+ descriptor['datacenter'][param] = kwargs[param]
+
+ return self._create_item("datacenters", descriptor, all_tenants=None)
+
+ def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+ '''Edit the parameters of a datacenter
+ Params: must supply a descriptor or/and a parameter to change
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'datacenter':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ parameters to change can be supplyied by the descriptor or as parameters:
+ new_name: the datacenter name
+ vim_url: the datacenter URL
+ vim_url_admin: the datacenter URL for administrative issues
+ vim_type: the datacenter type, can be openstack or openvim.
+ public: boolean, available to other tenants
+ description: datacenter description
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif kwargs:
+ descriptor={"datacenter": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'datacenter' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+ for param in kwargs:
+ if param=='new_name':
+ descriptor['datacenter']['name'] = kwargs[param]
+ else:
+ descriptor['datacenter'][param] = kwargs[param]
+ return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None)
+
+ def attach_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, vim_user=None, vim_password=None, vim_tenant_name=None, vim_tenant_id=None):
+ #check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+ tenant_text = "/"+self._get_tenant()
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+ descriptor={"datacenter": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor or params")
+
+ if vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+ #print args.name
+ try:
+ if vim_user:
+ descriptor['datacenter']['vim_user'] = vim_user
+ if vim_password:
+ descriptor['datacenter']['vim_password'] = vim_password
+ if vim_tenant_name:
+ descriptor['datacenter']['vim_tenant_name'] = vim_tenant_name
+ if vim_tenant_id:
+ descriptor['datacenter']['vim_tenant'] = vim_tenant_id
+ except (KeyError, TypeError) as e:
+ if str(e)=='datacenter': error_pos= "missing field 'datacenter'"
+ else: error_pos="wrong format"
+ raise OpenmanoBadParamsException("Wrong datacenter descriptor: " + error_pos)
+
+ payload_req = yaml.safe_dump(descriptor)
+ #print payload_req
+ URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def detach_datacenter(self, uuid=None, name=None):
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=False)
+ tenant_text = "/"+self._get_tenant()
+ URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano DELETE %s", URLrequest)
+ mano_response = requests.delete(URLrequest, headers = self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ #VNFS
+ def list_vnfs(self, all_tenants=False, **kwargs):
+ '''Obtain a list of vnfs
+ Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'vnfs':[{vnf1_info},{vnf2_info},...]}}
+ '''
+ return self._list_item("vnfs", all_tenants, kwargs)
+
+ def get_vnf(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a vnf
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'vnf':{vnf_info}}
+ '''
+ return self._get_item("vnfs", uuid, name, all_tenants)
+
+ def delete_vnf(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a vnf
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("vnfs", uuid, name, all_tenants)
+
+ def create_vnf(self, descriptor=None, descriptor_format=None, **kwargs):
+ '''Creates a vnf
+ Params: must supply a descriptor
+ descriptor: with format {'vnf':{new_vnf_info}}
+ must be a dictionary or a json/yaml text.
+ must be a dictionary or a json/yaml text.
+ Other parameters can be:
+ name: the vnf name. Overwrite descriptor name if any
+ image_path: Can be a string or a string list. Overwrite the image_path at descriptor
+ description: vnf descriptor.. Overwrite descriptor description if any
+ public: boolean, available to other tenants
+ class: user text for vnf classification
+ tenant_id: Propietary tenant
+ ...
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'vnf':{new_vnf_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'vnf' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'vnf' field, and an optional version")
+ for param in kwargs:
+ if param == 'image_path':
+ #print args.name
+ try:
+ if isinstance(kwargs[param], str):
+ descriptor['vnf']['VNFC'][0]['VNFC image']=kwargs[param]
+ elif isinstance(kwargs[param], tuple) or isinstance(kwargs[param], list):
+ index=0
+ for image_path_ in kwargs[param]:
+ #print "image-path", image_path_
+ descriptor['vnf']['VNFC'][index]['VNFC image']=image_path_
+ index=index+1
+ else:
+ raise OpenmanoBadParamsException("Wrong image_path type. Expected text or a text list")
+ except (KeyError, TypeError) as e:
+ if str(e)=='vnf': error_pos= "missing field 'vnf'"
+ elif str(e)=='VNFC': error_pos= "missing field 'vnf':'VNFC'"
+ elif str(e)==str(index): error_pos= "field 'vnf':'VNFC' must be an array"
+ elif str(e)=='VNFC image': error_pos= "missing field 'vnf':'VNFC'['VNFC image']"
+ else: error_pos="wrong format"
+ raise OpenmanoBadParamsException("Wrong VNF descriptor: " + error_pos)
+ else:
+ descriptor['vnf'][param] = kwargs[param]
+ return self._create_item("vnfs", descriptor)
+
+ # def edit_vnf(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+ # '''Edit the parameters of a vnf
+ # Params: must supply a descriptor or/and a parameters to change
+ # uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ # descriptor: with format {'vnf':{params to change info}}
+ # parameters to change can be supplyied by the descriptor or as parameters:
+ # new_name: the vnf name
+ # vim_url: the vnf URL
+ # vim_url_admin: the vnf URL for administrative issues
+ # vim_type: the vnf type, can be openstack or openvim.
+ # public: boolean, available to other tenants
+ # description: vnf description
+ # Return: Raises an exception on error, not found or found several
+ # Obtain a dictionary with format {'vnf':{new_vnf_info}}
+ # '''
+ #
+ # if isinstance(descriptor, str):
+ # descriptor = self.parse(descriptor, descriptor_format)
+ # elif descriptor:
+ # pass
+ # elif kwargs:
+ # descriptor={"vnf": {}}
+ # else:
+ # raise OpenmanoBadParamsException("Missing descriptor")
+ #
+ # if 'vnf' not in descriptor or len(descriptor)>2:
+ # raise OpenmanoBadParamsException("Descriptor must contain only one 'vnf' field")
+ # for param in kwargs:
+ # if param=='new_name':
+ # descriptor['vnf']['name'] = kwargs[param]
+ # else:
+ # descriptor['vnf'][param] = kwargs[param]
+ # return self._edit_item("vnfs", descriptor, uuid, name, all_tenants=None)
+
+ #SCENARIOS
+ def list_scenarios(self, all_tenants=False, **kwargs):
+ '''Obtain a list of scenarios
+ Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'scenarios':[{scenario1_info},{scenario2_info},...]}}
+ '''
+ return self._list_item("scenarios", all_tenants, kwargs)
+
+ def get_scenario(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a scenario
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'scenario':{scenario_info}}
+ '''
+ return self._get_item("scenarios", uuid, name, all_tenants)
+
+ def delete_scenario(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a scenario
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("scenarios", uuid, name, all_tenants)
+
+ def create_scenario(self, descriptor=None, descriptor_format=None, **kwargs):
+ '''Creates a scenario
+ Params: must supply a descriptor
+ descriptor: with format {'scenario':{new_scenario_info}}
+ must be a dictionary or a json/yaml text.
+ Other parameters can be:
+ name: the scenario name. Overwrite descriptor name if any
+ description: scenario descriptor.. Overwrite descriptor description if any
+ public: boolean, available to other tenants
+ tenant_id. Propietary tenant
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'scenario':{new_scenario_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'scenario' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'scenario' field, and an optional version")
+ for param in kwargs:
+ descriptor['scenario'][param] = kwargs[param]
+ return self._create_item("scenarios", descriptor)
+
+ def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+ '''Edit the parameters of a scenario
+ Params: must supply a descriptor or/and a parameters to change
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'scenario':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ parameters to change can be supplyied by the descriptor or as parameters:
+ new_name: the scenario name
+ public: boolean, available to other tenants
+ description: scenario description
+ tenant_id. Propietary tenant
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'scenario':{new_scenario_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif kwargs:
+ descriptor={"scenario": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'scenario' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'scenario' field")
+ for param in kwargs:
+ if param=='new_name':
+ descriptor['scenario']['name'] = kwargs[param]
+ else:
+ descriptor['scenario'][param] = kwargs[param]
+ return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None)
+
+
+ #INSTANCE-SCENARIOS
+ def list_instances(self, all_tenants=False, **kwargs):
+ '''Obtain a list of instances
+ Params: can be filtered by 'uuid','name','description','scenario_id', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'instances':[{instance1_info},{instance2_info},...]}}
+ '''
+ return self._list_item("instances", all_tenants, kwargs)
+
+ def get_instance(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a instance
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'instance':{instance_info}}
+ '''
+ return self._get_item("instances", uuid, name, all_tenants)
+
+ def delete_instance(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a instance
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("instances", uuid, name, all_tenants)
+
+ def create_instance(self, descriptor=None, descriptor_format=None, name=None, **kwargs):
+ '''Creates a instance
+ Params: must supply a descriptor or/and a name and scenario
+ descriptor: with format {'instance':{new_instance_info}}
+ must be a dictionary or a json/yaml text.
+ name: the instance name. Overwrite descriptor name if any
+ Other parameters can be:
+ description: instance descriptor.. Overwrite descriptor description if any
+ datacenter_name, datacenter_id: datacenter where to be deployed
+ scenario_name, scenario_id: Scenario this instance is based on
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'instance':{new_instance_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name and ("scenario_name" in kwargs or "scenario_id" in kwargs):
+ descriptor = {"instance":{"name": name}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'instance' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'instance' field, and an optional version")
+ if name:
+ descriptor['instance']["name"] = name
+ if "scenario_name" in kwargs or "scenario_id" in kwargs:
+ descriptor['instance']["scenario"] = self._get_item_uuid("scenarios", kwargs.get("scenario_id"), kwargs.get("scenario_name"))
+ if "datacenter_name" in kwargs or "datacenter_id" in kwargs:
+ descriptor['instance']["datacenter"] = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"))
+ if "description" in kwargs:
+ descriptor['instance']["description"] = kwargs.get("description")
+ #for param in kwargs:
+ # descriptor['instance'][param] = kwargs[param]
+ if "datacenter" not in descriptor['instance']:
+ descriptor['instance']["datacenter"] = self._get_datacenter()
+ return self._create_item("instances", descriptor)
+
+ #VIM ACTIONS
+ def vim_action(self, action, item, uuid=None, all_tenants=False, **kwargs):
+ '''Perform an action over a vim
+ Params:
+ action: can be 'list', 'get'/'show', 'delete' or 'create'
+ item: can be 'tenants' or 'networks'
+ uuid: uuid of the tenant/net to show or to delete. Ignore otherwise
+ other parameters:
+ datacenter_name, datacenter_id: datacenters to act on, if missing uses classes store datacenter
+ descriptor, descriptor_format: descriptor needed on creation, can be a dict or a yaml/json str
+ must be a dictionary or a json/yaml text.
+ name: for created tenant/net Overwrite descriptor name if any
+ description: tenant descriptor. Overwrite descriptor description if any
+
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenant':{new_tenant_info}}
+ '''
+ if item not in ("tenants", "networks", "images"):
+ raise OpenmanoBadParamsException("Unknown value for item '{}', must be 'tenants', 'nets' or "
+ "images".format(str(item)))
+
+ image_actions = ['list','get','show','delete']
+ if item == "images" and action not in image_actions:
+ raise OpenmanoBadParamsException("Only available actions for item '{}' are {}\n"
+ "Requested action was '{}'".format(item, ', '.join(image_actions), action))
+ if all_tenants:
+ tenant_text = "/any"
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ if "datacenter_id" in kwargs or "datacenter_name" in kwargs:
+ datacenter = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"), all_tenants=all_tenants)
+ else:
+ datacenter = self._get_datacenter()
+
+ if action=="list":
+ URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="get" or action=="show":
+ URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="delete":
+ URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+ self.logger.debug("DELETE %s", URLrequest )
+ mano_response = requests.delete(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="create":
+ if "descriptor" in kwargs:
+ if isinstance(kwargs["descriptor"], str):
+ descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format") )
+ else:
+ descriptor = kwargs["descriptor"]
+ elif "name" in kwargs:
+ descriptor={item[:-1]: {"name": kwargs["name"]}}
+ else:
+ raise OpenmanoResponseException("Missing descriptor")
+
+ if item[:-1] not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if "name" in kwargs:
+ descriptor[ item[:-1] ]['name'] = kwargs["name"]
+ if "description" in kwargs:
+ descriptor[ item[:-1] ]['description'] = kwargs["description"]
+ payload_req = yaml.safe_dump(descriptor)
+ #print payload_req
+ URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ else:
+ raise OpenmanoBadParamsException("Unknown value for action '{}".format(str(action)))
+
--- /dev/null
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ #HTTP Server parameters (MANO API). IP address and port where openmanod listens
+ # IPtables/firewalld must allow this port
+ # for CentOS/Redhad firewalld is configured at '/etc/firewalld/services/openmanod.xml'
+ # edit this file and reload firewalld with 'firewall-cmd --reload' if port is changed
+ http_host: 0.0.0.0 # IP address, (by default, 0.0.0.0 means that it will listen in all interfaces)
+ http_port: 9090 # General port (by default, 9090)
+ #http_admin_port: 9095 # Admin port where openmano is listening (when missing, no administration server is launched)
+ # Not used in current version!
+
+ #Parameters for a VIM console access. Can be directly the VIM URL or a proxy to offer the openmano IP address
+ http_console_proxy: False #by default True. If False proxy is not implemented and VIM URL is offered. It is
+ #assumed then, that client can access directly to the VIMs
+ #http_console_host: <ip> #by default the same as 'http_host'. However is openmano server is behind a NAT/proxy
+ #you should specify the public IP used to access the server. Also when 'http_host' is
+ #0.0.0.0 you should specify the concrete IP address (or name) the server is accessed
+ # Ports to be used. Comma separated list. Can contain a {"from":<port>, "to":<port>} entry
+ #e.g. from 9000 to 9005: [{"from":9000, "to":9005}], or also [9000,9001,9002,9003,9004,9005]
+ #e.g. from 9000 to 9100 apart from 9050,9053: [{"from":9000, "to":9049},9051,9052,{"from":9054, "to":9099}]
+ http_console_ports: [{"from":9096, "to":9110}]
+
+ #Database parameters
+ db_host: localhost # by default localhost
+ db_user: mano # DB user
+ db_passwd: manopw # DB password
+ db_name: mano_db # Name of the MANO DB
++# Database ovim parameters
++db_ovim_host: localhost # by default localhost
++db_ovim_user: mano # DB user
++db_ovim_passwd: manopw # DB password
++db_ovim_name: mano_vim_db # Name of the OVIM MANO DB
++
+
+ #other MANO parameters
+ # Folder where the VNF descriptors will be stored
+ # The folder will be created in the execution folder if it does not exist
+ #vnf_repository: "./vnfrepo" # Use an absolute path to avoid misunderstandings
+
+ # Indicates if at VNF onboarding, flavors and images are loaded at all related VIMs,
+ # in order to speed up the later instantiation.
+ auto_push_VNF_to_VIMs: False # by default True
+
+ #general logging parameters
+ #choose among: DEBUG, INFO, WARNING, ERROR, CRITICAL
+ log_level: DEBUG #general log levels for internal logging
+ #standard output is used unless 'log_file' is specify
+ #log_file: /var/log/openmano/openmano.log
+
+ #individual logging settings
+ #log_level_db: DEBUG #database log levels
+ #log_file_db: /opt/openmano/logs/openmano_db.log
+ #log_level_vim: DEBUG #VIM connection log levels
+ #log_file_vim: /opt/openmano/logs/openmano_vimconn.log
+ #log_level_nfvo: DEBUG #Main engine log levels
+ #log_file_nfvo: /opt/openmano/logs/openmano_nfvo.log
+ #log_level_http: DEBUG #Main engine log levels
+ #log_file_http: /opt/openmano/logs/openmano_http.log
+ #log_level_console: DEBUG #proxy console log levels
+ #log_file_console: /opt/openmano/logs/openmano_console.log
++#log_level_ovim: DEBUG #ovim library log levels
++#log_file_ovim: /opt/openmano/logs/openmano_ovim.log
+
+ #Uncomment to send logs via IP to an external host
+ #log_socket_host: localhost
+ log_socket_port: 9022
+ log_socket_level: DEBUG #general log levels for socket logging
--- /dev/null
-__author__ = "Alfonso Tierno"
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openvim
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ This is thread that interact with the host and the libvirt to manage VM
+ One thread will be launched per host
+ '''
- def __init__(self, vimconn, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None, db=None, db_lock=None):
++__author__ = "Alfonso Tierno, Pablo Montes"
+ __date__ = "$10-feb-2017 12:07:15$"
+
+ import threading
+ import time
+ import Queue
+ import logging
+ import vimconn
+ from db_base import db_base_Exception
++from ovim import ovimException
+
+
+ # from logging import Logger
+ # import auxiliary_functions as af
+
+
+ def is_task_id(id):
+ return True if id[:5] == "TASK." else False
+
+
+ class vim_thread(threading.Thread):
+
- if not self.task_queue.empty():
- task = self.task_queue.get()
- self.task_lock.acquire()
- if task["status"] == "deleted":
++ def __init__(self, vimconn, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None, db=None, db_lock=None, ovim=None):
+ """Init a thread.
+ Arguments:
+ 'id' number of thead
+ 'name' name of thread
+ 'host','user': host ip or name to manage and user
+ 'db', 'db_lock': database class and lock to use it in exclusion
+ """
+ self.tasksResult = {}
+ """ It will contain a dictionary with
+ task_id:
+ status: enqueued,done,error,deleted,processing
+ result: VIM result,
+ """
+ threading.Thread.__init__(self)
+ self.vim = vimconn
+ self.datacenter_name = datacenter_name
+ self.datacenter_tenant_id = datacenter_tenant_id
++ self.ovim = ovim
+ if not name:
+ self.name = vimconn["id"] + "." + vimconn["config"]["datacenter_tenant_id"]
+ else:
+ self.name = name
+
+ self.logger = logging.getLogger('openmano.vim.'+self.name)
+ self.db = db
+ self.db_lock = db_lock
+
+ self.task_lock = task_lock
+ self.task_queue = Queue.Queue(2000)
++ self.refresh_list = []
++ """Contains time ordered task list for refreshing the status of VIM VMs and nets"""
++
++ def _refres_elements(self):
++ """Call VIM to get VMs and networks status until 10 elements"""
++ now = time.time()
++ vm_to_refresh_list = []
++ net_to_refresh_list = []
++ vm_to_refresh_dict = {}
++ net_to_refresh_dict = {}
++ items_to_refresh = 0
++ while self.refresh_list:
++ task = self.refresh_list[0]
++ with self.task_lock:
++ if task['status'] == 'deleted':
++ self.refresh_list.pop(0)
++ continue
++ if task['time'] > now:
++ break
++ task["status"] = "processing"
++ self.refresh_list.pop(0)
++ if task["name"] == 'get-vm':
++ vm_to_refresh_list.append(task["vim_id"])
++ vm_to_refresh_dict[task["vim_id"]] = task
++ elif task["name"] == 'get-net':
++ net_to_refresh_list.append(task["vim_id"])
++ net_to_refresh_dict[task["vim_id"]] = task
++ else:
++ error_text = "unknown task {}".format(task["name"])
++ self.logger.error(error_text)
++ items_to_refresh += 1
++ if items_to_refresh == 10:
++ break
++
++ if vm_to_refresh_list:
++ try:
++ vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
++ for vim_id, vim_info in vim_dict.items():
++ #look for task
++ task = vm_to_refresh_dict[vim_id]
++ self.logger.debug("get-vm vm_id=%s result=%s", task["vim_id"], str(vim_info))
++
++ # update database
++ if vim_info.get("error_msg"):
++ vim_info["error_msg"] = self._format_vim_error_msg(vim_info["error_msg"])
++ if task["vim_info"].get("status") != vim_info["status"] or \
++ task["vim_info"].get("error_msg") != vim_info.get("error_msg") or \
++ task["vim_info"].get("vim_info") != vim_info["vim_info"]:
++ with self.db_lock:
++ temp_dict = {"status": vim_info["status"],
++ "error_msg": vim_info.get("error_msg"),
++ "vim_info": vim_info["vim_info"]}
++ self.db.update_rows('instance_vms', UPDATE=temp_dict, WHERE={"vim_vm_id": vim_id})
++ for interface in vim_info["interfaces"]:
++ for task_interface in task["vim_info"]["interfaces"]:
++ if task_interface["vim_net_id"] == interface["vim_net_id"]:
++ break
++ else:
++ task_interface = {"vim_net_id": interface["vim_net_id"]}
++ task["vim_info"]["interfaces"].append(task_interface)
++ if task_interface != interface:
++ #delete old port
++ if task_interface.get("sdn_port_id"):
++ try:
++ self.ovim.delete_port(task_interface["sdn_port_id"])
++ task_interface["sdn_port_id"] = None
++ except ovimException as e:
++ self.logger.error("ovimException deleting external_port={} ".format(
++ task_interface["sdn_port_id"]) + str(e), exc_info=True)
++ # TODO Set error_msg at instance_nets
++ vim_net_id = interface.pop("vim_net_id")
++ sdn_net_id = None
++ sdn_port_name = None
++ with self.db_lock:
++ where_= {'iv.vim_vm_id': vim_id, "ine.vim_net_id": vim_net_id,
++ 'ine.datacenter_tenant_id': self.datacenter_tenant_id}
++ # TODO check why vim_interface_id is not present at database
++ # if interface.get("vim_interface_id"):
++ # where_["vim_interface_id"] = interface["vim_interface_id"]
++ db_ifaces = self.db.get_rows(
++ FROM="instance_interfaces as ii left join instance_nets as ine on "
++ "ii.instance_net_id=ine.uuid left join instance_vms as iv on "
++ "ii.instance_vm_id=iv.uuid",
++ SELECT=("ii.uuid as iface_id", "ine.uuid as net_id", "iv.uuid as vm_id", "sdn_net_id"),
++ WHERE=where_)
++ if len(db_ifaces)>1:
++ self.logger.critical("Refresing interfaces. "
++ "Found more than one interface at database for '{}'".format(where_))
++ elif len(db_ifaces)==0:
++ self.logger.critical("Refresing interfaces. "
++ "Not found any interface at database for '{}'".format(where_))
++ continue
++ else:
++ db_iface = db_ifaces[0]
++ if db_iface.get("sdn_net_id") and interface.get("compute_node") and interface.get("pci"):
++ sdn_net_id = db_iface["sdn_net_id"]
++ sdn_port_name = sdn_net_id + "." + db_iface["vm_id"]
++ sdn_port_name = sdn_port_name[:63]
++ try:
++ sdn_port_id = self.ovim.new_external_port(
++ {"compute_node": interface["compute_node"],
++ "pci": interface["pci"],
++ "vlan": interface.get("vlan"),
++ "net_id": sdn_net_id,
++ "region": self.vim["config"]["datacenter_id"],
++ "name": sdn_port_name,
++ "mac": interface.get("mac_address")})
++ interface["sdn_port_id"] = sdn_port_id
++ except (ovimException, Exception) as e:
++ self.logger.error(
++ "ovimException creating new_external_port compute_node={} " \
++ "pci={} vlan={} ".format(
++ interface["compute_node"],
++ interface["pci"],
++ interface.get("vlan")) + str(e),
++ exc_info=True)
++ # TODO Set error_msg at instance_nets
++ with self.db_lock:
++ self.db.update_rows('instance_interfaces', UPDATE=interface,
++ WHERE={'uuid': db_iface["iface_id"]})
++ # TODO insert instance_id
++ interface["vim_net_id"] = vim_net_id
++
++ task["vim_info"] = vim_info
++ if task["vim_info"]["status"] == "BUILD":
++ self._insert_refresh(task, now+5) # 5seconds
++ else:
++ self._insert_refresh(task, now+300) # 5minutes
++ except vimconn.vimconnException as e:
++ self.logger.error("vimconnException Exception when trying to refresh vms " + str(e))
++ self._insert_refresh(task, now + 300) # 5minutes
++
++ if net_to_refresh_list:
++ try:
++ vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
++ for vim_id, vim_info in vim_dict.items():
++ #look for task
++ task = net_to_refresh_dict[vim_id]
++ self.logger.debug("get-net net_id=%s result=%s", task["vim_id"], str(vim_info))
++
++ #get database info
++ where_ = {"vim_net_id": vim_id, 'datacenter_tenant_id': self.datacenter_tenant_id}
++ with self.db_lock:
++ db_nets = self.db.get_rows(
++ FROM="instance_nets",
++ SELECT=("uuid as net_id", "sdn_net_id"),
++ WHERE=where_)
++ if len(db_nets) > 1:
++ self.logger.critical("Refresing networks. "
++ "Found more than one instance-networks at database for '{}'".format(where_))
++ elif len(db_nets) == 0:
++ self.logger.critical("Refresing networks. "
++ "Not found any instance-network at database for '{}'".format(where_))
++ continue
++ else:
++ db_net = db_nets[0]
++ if db_net.get("sdn_net_id"):
++ # get ovim status
++ try:
++ sdn_net = self.ovim.show_network(db_net["sdn_net_id"])
++ if sdn_net["status"] == "ERROR":
++ if not vim_info.get("error_msg"):
++ vim_info["error_msg"] = sdn_net["error_msg"]
++ else:
++ vim_info["error_msg"] = "VIM_ERROR: {} && SDN_ERROR: {}".format(
++ self._format_vim_error_msg(vim_info["error_msg"], 1024//2-14),
++ self._format_vim_error_msg(sdn_net["error_msg"], 1024//2-14))
++ if vim_info["status"] == "VIM_ERROR":
++ vim_info["status"] = "VIM_SDN_ERROR"
++ else:
++ vim_info["status"] = "SDN_ERROR"
++
++ except (ovimException, Exception) as e:
++ self.logger.error(
++ "ovimException getting network infor snd_net_id={}".format(db_net["sdn_net_id"]),
++ exc_info=True)
++ # TODO Set error_msg at instance_nets
++
++ # update database
++ if vim_info.get("error_msg"):
++ vim_info["error_msg"] = self._format_vim_error_msg(vim_info["error_msg"])
++ if task["vim_info"].get("status") != vim_info["status"] or \
++ task["vim_info"].get("error_msg") != vim_info.get("error_msg") or \
++ task["vim_info"].get("vim_info") != vim_info["vim_info"]:
++ with self.db_lock:
++ temp_dict = {"status": vim_info["status"],
++ "error_msg": vim_info.get("error_msg"),
++ "vim_info": vim_info["vim_info"]}
++ self.db.update_rows('instance_nets', UPDATE=temp_dict, WHERE={"vim_net_id": vim_id})
++
++ task["vim_info"] = vim_info
++ if task["vim_info"]["status"] == "BUILD":
++ self._insert_refresh(task, now+5) # 5seconds
++ else:
++ self._insert_refresh(task, now+300) # 5minutes
++ except vimconn.vimconnException as e:
++ self.logger.error("vimconnException Exception when trying to refresh nets " + str(e))
++ self._insert_refresh(task, now + 300) # 5minutes
++
++ if not items_to_refresh:
++ time.sleep(1)
++
++ def _insert_refresh(self, task, threshold_time):
++ """Insert a task at list of refreshing elements. The refreshing list is ordered by threshold_time (task['time']
++ It is assumed that this is called inside this thread
++ """
++ task["time"] = threshold_time
++ for index in range(0, len(self.refresh_list)):
++ if self.refresh_list[index]["time"] > threshold_time:
++ self.refresh_list.insert(index, task)
++ break
++ else:
++ index = len(self.refresh_list)
++ self.refresh_list.append(task)
++ self.logger.debug("new refresh task={} name={}, time={} index={}".format(
++ task["id"], task["name"], task["time"], index))
++
++ def _remove_refresh(self, task_name, vim_id):
++ """Remove a task with this name and vim_id from the list of refreshing elements.
++ It is assumed that this is called inside this thread outside _refres_elements method
++ Return True if self.refresh_list is modified, task is found
++ Return False if not found
++ """
++ index_to_delete = None
++ for index in range(0, len(self.refresh_list)):
++ if self.refresh_list[index]["name"] == task_name and self.refresh_list[index]["vim_id"] == vim_id:
++ index_to_delete = index
++ break
++ else:
++ return False
++ if index_to_delete != None:
++ del self.refresh_list[index_to_delete]
++ return True
+
+ def insert_task(self, task):
+ try:
+ self.task_queue.put(task, False)
+ return task["id"]
+ except Queue.Full:
+ raise vimconn.vimconnException(self.name + ": timeout inserting a task")
+
+ def del_task(self, task):
+ with self.task_lock:
+ if task["status"] == "enqueued":
+ task["status"] == "deleted"
+ return True
+ else: # task["status"] == "processing"
+ self.task_lock.release()
+ return False
+
+ def run(self):
+ self.logger.debug("Starting")
+ while True:
+ #TODO reload service
+ while True:
- task["status"] == "processing"
- self.task_lock.release()
- else:
- now=time.time()
- time.sleep(1)
- continue
- self.logger.debug("processing task id={} name={} params={}".format(task["id"], task["name"],
- str(task["params"])))
- if task["name"] == 'exit' or task["name"] == 'reload':
- result, content = self.terminate(task)
- elif task["name"] == 'new-vm':
- result, content = self.new_vm(task)
- elif task["name"] == 'del-vm':
- result, content = self.del_vm(task)
- elif task["name"] == 'new-net':
- result, content = self.new_net(task)
- elif task["name"] == 'del-net':
- result, content = self.del_net(task)
- else:
- error_text = "unknown task {}".format(task["name"])
- self.logger.error(error_text)
- result = False
- content = error_text
++ try:
++ if not self.task_queue.empty():
++ task = self.task_queue.get()
++ self.task_lock.acquire()
++ if task["status"] == "deleted":
++ self.task_lock.release()
++ continue
++ task["status"] = "processing"
+ self.task_lock.release()
++ else:
++ self._refres_elements()
+ continue
- with self.task_lock:
- task["status"] = "done" if result else "error"
- task["result"] = content
- self.task_queue.task_done()
++ self.logger.debug("processing task id={} name={} params={}".format(task["id"], task["name"],
++ str(task["params"])))
++ if task["name"] == 'exit' or task["name"] == 'reload':
++ result, content = self.terminate(task)
++ elif task["name"] == 'new-vm':
++ result, content = self.new_vm(task)
++ elif task["name"] == 'del-vm':
++ result, content = self.del_vm(task)
++ elif task["name"] == 'new-net':
++ result, content = self.new_net(task)
++ elif task["name"] == 'del-net':
++ result, content = self.del_net(task)
++ else:
++ error_text = "unknown task {}".format(task["name"])
++ self.logger.error(error_text)
++ result = False
++ content = error_text
++ self.logger.debug("task id={} name={} result={}:{} params={}".format(task["id"], task["name"],
++ result, content,
++ str(task["params"])))
+
- if task["name"] == 'exit':
- return 0
- elif task["name"] == 'reload':
- break
++ with self.task_lock:
++ task["status"] = "done" if result else "error"
++ task["result"] = content
++ self.task_queue.task_done()
+
- self.db.update_rows("instance_nets", UPDATE={"vim_net_id": net_id}, WHERE={"vim_net_id": task_id})
++ if task["name"] == 'exit':
++ return 0
++ elif task["name"] == 'reload':
++ break
++ except Exception as e:
++ self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+
+ self.logger.debug("Finishing")
+
+ def terminate(self, task):
+ return True, None
+
++ def _format_vim_error_msg(self, error_text, max_length=1024):
++ if error_text and len(error_text) >= max_length:
++ return error_text[:max_length//2-3] + " ... " + error_text[-max_length//2+3:]
++ return error_text
++
+ def new_net(self, task):
+ try:
+ task_id = task["id"]
+ params = task["params"]
+ net_id = self.vim.new_network(*params)
++
++ net_name = params[0]
++ net_type = params[1]
++
++ network = None
++ sdn_net_id = None
++ sdn_controller = self.vim.config.get('sdn-controller')
++ if sdn_controller and (net_type == "data" or net_type == "ptp"):
++ network = {"name": net_name, "type": net_type}
++
++ vim_net = self.vim.get_network(net_id)
++ if vim_net.get('encapsulation') != 'vlan':
++ raise vimconn.vimconnException(
++ "net '{}' defined as type '{}' has not vlan encapsulation '{}'".format(
++ net_name, net_type, vim_net['encapsulation']))
++ network["vlan"] = vim_net.get('segmentation_id')
++ try:
++ sdn_net_id = self.ovim.new_network(network)
++ except (ovimException, Exception) as e:
++ self.logger.error("task=%s cannot create SDN network vim_net_id=%s input='%s' ovimException='%s'",
++ str(task_id), net_id, str(network), str(e))
+ with self.db_lock:
- if is_task_id(net["net_id"]): # change task_id into network_id
++ self.db.update_rows("instance_nets", UPDATE={"vim_net_id": net_id, "sdn_net_id": sdn_net_id},
++ WHERE={"vim_net_id": task_id})
++ new_refresh_task = {"status": "enqueued",
++ "id": task_id,
++ "name": "get-net",
++ "vim_id": net_id,
++ "vim_info": {} }
++ self._insert_refresh(new_refresh_task, time.time())
+ return True, net_id
+ except db_base_Exception as e:
+ self.logger.error("Error updating database %s", str(e))
+ return True, net_id
+ except vimconn.vimconnException as e:
++ self.logger.error("Error creating NET, task=%s: %s", str(task_id), str(e))
++ try:
++ with self.db_lock:
++ self.db.update_rows("instance_nets",
++ UPDATE={"error_msg": self._format_vim_error_msg(str(e)), "status": "VIM_ERROR"},
++ WHERE={"vim_net_id": task_id})
++ except db_base_Exception as e:
++ self.logger.error("Error updating database %s", str(e))
+ return False, str(e)
++ #except ovimException as e:
++ # self.logger.error("Error creating NET in ovim, task=%s: %s", str(task_id), str(e))
++ # return False, str(e)
+
+ def new_vm(self, task):
+ try:
+ params = task["params"]
+ task_id = task["id"]
+ depends = task.get("depends")
+ net_list = params[5]
++ error_text = ""
+ for net in net_list:
- return False, "Cannot create VM because depends on a network that cannot be created: " + \
++ if "net_id" in net and is_task_id(net["net_id"]): # change task_id into network_id
+ try:
+ task_net = depends[net["net_id"]]
+ with self.task_lock:
+ if task_net["status"] == "error":
- return False, "Cannot create VM because depends on a network still not created"
++ error_text = "Cannot create VM because depends on a network that cannot be created: " +\
+ str(task_net["result"])
++ break
+ elif task_net["status"] == "enqueued" or task_net["status"] == "processing":
- return False, "Error trying to map from task_id={} to task result: {}".format(net["net_id"],
- str(e))
- vm_id = self.vim.new_vminstance(*params)
- with self.db_lock:
- self.db.update_rows("instance_vms", UPDATE={"vim_vm_id": vm_id}, WHERE={"vim_vm_id": task_id})
- return True, vm_id
- except db_base_Exception as e:
- self.logger.error("Error updtaing database %s", str(e))
++ error_text = "Cannot create VM because depends on a network still not created"
++ break
+ network_id = task_net["result"]
+ net["net_id"] = network_id
+ except Exception as e:
- vm_id = task["params"]
++ error_text = "Error trying to map from task_id={} to task result: {}".format(
++ net["net_id"],str(e))
++ break
++ if not error_text:
++ vm_id = self.vim.new_vminstance(*params)
++ try:
++ with self.db_lock:
++ if error_text:
++ update = self.db.update_rows("instance_vms",
++ UPDATE={"status": "VIM_ERROR", "error_msg": error_text},
++ WHERE={"vim_vm_id": task_id})
++ else:
++ update = self.db.update_rows("instance_vms", UPDATE={"vim_vm_id": vm_id}, WHERE={"vim_vm_id": task_id})
++ if not update:
++ self.logger.error("task id={} name={} database not updated vim_vm_id={}".format(
++ task["id"], task["name"], vm_id))
++ except db_base_Exception as e:
++ self.logger.error("Error updating database %s", str(e))
++ if error_text:
++ return False, error_text
++ new_refresh_task = {"status": "enqueued",
++ "id": task_id,
++ "name": "get-vm",
++ "vim_id": vm_id,
++ "vim_info": {"interfaces":[]} }
++ self._insert_refresh(new_refresh_task, time.time())
+ return True, vm_id
+ except vimconn.vimconnException as e:
++ self.logger.error("Error creating VM, task=%s: %s", str(task_id), str(e))
++ try:
++ with self.db_lock:
++ self.db.update_rows("instance_vms",
++ UPDATE={"error_msg": self._format_vim_error_msg(str(e)), "status": "VIM_ERROR"},
++ WHERE={"vim_vm_id": task_id})
++ except db_base_Exception as edb:
++ self.logger.error("Error updating database %s", str(edb))
+ return False, str(e)
+
+ def del_vm(self, task):
- return False, "Cannot delete VM because still creating"
++ vm_id = task["params"][0]
++ interfaces = task["params"][1]
+ if is_task_id(vm_id):
+ try:
+ task_create = task["depends"][vm_id]
+ with self.task_lock:
+ if task_create["status"] == "error":
+ return True, "VM was not created. It has error: " + str(task_create["result"])
+ elif task_create["status"] == "enqueued" or task_create["status"] == "processing":
- net_id = task["params"]
++ return False, "Cannot delete VM vim_id={} because still creating".format(vm_id)
+ vm_id = task_create["result"]
+ except Exception as e:
+ return False, "Error trying to get task_id='{}':".format(vm_id, str(e))
+ try:
++ self._remove_refresh("get-vm", vm_id)
++ for iface in interfaces:
++ if iface.get("sdn_port_id"):
++ try:
++ self.ovim.delete_port(iface["sdn_port_id"])
++ except ovimException as e:
++ self.logger.error("ovimException deleting external_port={} at VM vim_id={} deletion ".format(
++ iface["sdn_port_id"], vm_id) + str(e), exc_info=True)
++ # TODO Set error_msg at instance_nets
++
+ return True, self.vim.delete_vminstance(vm_id)
+ except vimconn.vimconnException as e:
+ return False, str(e)
+
+ def del_net(self, task):
- return True, self.vim.delete_network(net_id)
++ net_id = task["params"][0]
++ sdn_net_id = task["params"][1]
+ if is_task_id(net_id):
+ try:
+ task_create = task["depends"][net_id]
+ with self.task_lock:
+ if task_create["status"] == "error":
+ return True, "net was not created. It has error: " + str(task_create["result"])
+ elif task_create["status"] == "enqueued" or task_create["status"] == "processing":
+ return False, "Cannot delete net because still creating"
+ net_id = task_create["result"]
+ except Exception as e:
+ return False, "Error trying to get task_id='{}':".format(net_id, str(e))
+ try:
++ self._remove_refresh("get-net", net_id)
++ result = self.vim.delete_network(net_id)
++ if sdn_net_id:
++ with self.db_lock:
++ self.ovim.delete_network(sdn_net_id)
++ return True, result
+ except vimconn.vimconnException as e:
+ return False, str(e)
++ except ovimException as e:
++ logging.error("Error deleting network from ovim. net_id: {}, sdn_net_id: {}".format(net_id, sdn_net_id))
++ return False, str(e)
+
+
--- /dev/null
- physical_compute: #identification of compute node where PF,VF interface is allocated
- physical_pci: #PCI address of the NIC that hosts the PF,VF
- physical_vlan: #physical VLAN used for VF
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ """
+ vimconn implement an Abstract class for the vim connector plugins
+ with the definition of the method to be implemented.
+ """
+ __author__="Alfonso Tierno"
+ __date__ ="$16-oct-2015 11:09:29$"
+
+ import logging
+
+ #Error variables
+ HTTP_Bad_Request = 400
+ HTTP_Unauthorized = 401
+ HTTP_Not_Found = 404
+ HTTP_Method_Not_Allowed = 405
+ HTTP_Request_Timeout = 408
+ HTTP_Conflict = 409
+ HTTP_Not_Implemented = 501
+ HTTP_Service_Unavailable = 503
+ HTTP_Internal_Server_Error = 500
+
+ class vimconnException(Exception):
+ """Common and base class Exception for all vimconnector exceptions"""
+ def __init__(self, message, http_code=HTTP_Bad_Request):
+ Exception.__init__(self, message)
+ self.http_code = http_code
+
+ class vimconnConnectionException(vimconnException):
+ """Connectivity error with the VIM"""
+ def __init__(self, message, http_code=HTTP_Service_Unavailable):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnUnexpectedResponse(vimconnException):
+ """Get an wrong response from VIM"""
+ def __init__(self, message, http_code=HTTP_Service_Unavailable):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnAuthException(vimconnException):
+ """Invalid credentials or authorization to perform this action over the VIM"""
+ def __init__(self, message, http_code=HTTP_Unauthorized):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnNotFoundException(vimconnException):
+ """The item is not found at VIM"""
+ def __init__(self, message, http_code=HTTP_Not_Found):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnConflictException(vimconnException):
+ """There is a conflict, e.g. more item found than one"""
+ def __init__(self, message, http_code=HTTP_Conflict):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnNotSupportedException(vimconnException):
+ """The request is not supported by connector"""
+ def __init__(self, message, http_code=HTTP_Service_Unavailable):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnNotImplemented(vimconnException):
+ """The method is not implemented by the connected"""
+ def __init__(self, message, http_code=HTTP_Not_Implemented):
+ vimconnException.__init__(self, message, http_code)
+
+ class vimconnector():
+ """Abstract base class for all the VIM connector plugins
+ These plugins must implement a vimconnector class derived from this
+ and all these privated methods
+ """
+ def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+ config={}, persitent_info={}):
+ """Constructor of VIM
+ Params:
+ 'uuid': id asigned to this VIM
+ 'name': name assigned to this VIM, can be used for logging
+ 'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+ 'url_admin': (optional), url used for administrative tasks
+ 'user', 'passwd': credentials of the VIM user
+ 'log_level': provider if it should use a different log_level than the general one
+ 'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
+ at creation and particular VIM config at teh attachment
+ 'persistent_info': dict where the class can store information that will be available among class
+ destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+ empty dict. Useful to store login/tokens information for speed up communication
+
+ Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
+ check against the VIM
+ """
+ self.id = uuid
+ self.name = name
+ self.url = url
+ self.url_admin = url_admin
+ self.tenant_id = tenant_id
+ self.tenant_name = tenant_name
+ self.user = user
+ self.passwd = passwd
+ self.config = config
+ self.logger = logging.getLogger('openmano.vim')
+ if log_level:
+ self.logger.setLevel( getattr(logging, log_level) )
+ if not self.url_admin: #try to use normal url
+ self.url_admin = self.url
+
+ def __getitem__(self,index):
+ if index=='tenant_id':
+ return self.tenant_id
+ if index=='tenant_name':
+ return self.tenant_name
+ elif index=='id':
+ return self.id
+ elif index=='name':
+ return self.name
+ elif index=='user':
+ return self.user
+ elif index=='passwd':
+ return self.passwd
+ elif index=='url':
+ return self.url
+ elif index=='url_admin':
+ return self.url_admin
+ elif index=="config":
+ return self.config
+ else:
+ raise KeyError("Invalid key '%s'" %str(index))
+
+ def __setitem__(self,index, value):
+ if index=='tenant_id':
+ self.tenant_id = value
+ if index=='tenant_name':
+ self.tenant_name = value
+ elif index=='id':
+ self.id = value
+ elif index=='name':
+ self.name = value
+ elif index=='user':
+ self.user = value
+ elif index=='passwd':
+ self.passwd = value
+ elif index=='url':
+ self.url = value
+ elif index=='url_admin':
+ self.url_admin = value
+ else:
+ raise KeyError("Invalid key '%s'" %str(index))
+
+ def check_vim_connectivity(self):
+ """Checks VIM can be reached and user credentials are ok.
+ Returns None if success or raised vimconnConnectionException, vimconnAuthException, ...
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_tenant(self,tenant_name,tenant_description):
+ """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+ "tenant_name": string max lenght 64
+ "tenant_description": string max length 256
+ returns the tenant identifier or raise exception
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def delete_tenant(self,tenant_id,):
+ """Delete a tenant from VIM
+ tenant_id: returned VIM tenant_id on "new_tenant"
+ Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_tenant_list(self, filter_dict={}):
+ """Obtain tenants of VIM
+ filter_dict dictionary that can contain the following keys:
+ name: filter by tenant name
+ id: filter by tenant uuid/id
+ <other VIM specific>
+ Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+ [{'name':'<name>, 'id':'<id>, ...}, ...]
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ """Adds a tenant network to VIM
+ Params:
+ 'net_name': name of the network
+ 'net_type': one of:
+ 'bridge': overlay isolated network
+ 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
+ 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
+ 'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
+ 'ip-version': can be one of ["IPv4","IPv6"]
+ 'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
+ 'gateway-address': (Optional) ip_schema, that is X.X.X.X
+ 'dns-address': (Optional) ip_schema,
+ 'dhcp': (Optional) dict containing
+ 'enabled': {"type": "boolean"},
+ 'start-address': ip_schema, first IP to grant
+ 'count': number of IPs to grant.
+ 'shared': if this network can be seen/use by other tenants/organization
+ 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+ Returns the network identifier on success or raises and exception on failure
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_network_list(self, filter_dict={}):
+ """Obtain tenant networks of VIM
+ Params:
+ 'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+ name: string => returns only networks with this name
+ id: string => returns networks with this VIM id, this imply returns one network at most
+ shared: boolean >= returns only networks that are (or are not) shared
+ tenant_id: sting => returns only networks that belong to this tenant/project
+ ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+ #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+ Returns the network list of dictionaries. each dictionary contains:
+ 'id': (mandatory) VIM network id
+ 'name': (mandatory) VIM network name
+ 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
++ 'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
++ 'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+ authorization, or some other unspecific error
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_network(self, net_id):
+ """Obtain network details from the 'net_id' VIM network
+ Return a dict that contains:
+ 'id': (mandatory) VIM network id, that is, net_id
+ 'name': (mandatory) VIM network name
+ 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ Raises an exception upon error or when network is not found
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def delete_network(self, net_id):
+ """Deletes a tenant network from VIM
+ Returns the network identifier or raises an exception upon error or when network is not found
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def refresh_nets_status(self, net_list):
+ """Get the status of the networks
+ Params:
+ 'net_list': a list with the VIM network id to be get the status
+ Returns a dictionary with:
+ 'net_id': #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ 'net_id2': ...
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_flavor(self, flavor_id):
+ """Obtain flavor details from the VIM
+ Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+ Raises an exception upon error or if not found
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_flavor_id_from_data(self, flavor_dict):
+ """Obtain flavor id that match the flavor description
+ Params:
+ 'flavor_dict': dictionary that contains:
+ 'disk': main hard disk in GB
+ 'ram': meomry in MB
+ 'vcpus': number of virtual cpus
+ #TODO: complete parameters for EPA
+ Returns the flavor_id or raises a vimconnNotFoundException
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_flavor(self, flavor_data):
+ """Adds a tenant flavor to VIM
+ flavor_data contains a dictionary with information, keys:
+ name: flavor name
+ ram: memory (cloud type) in MBytes
+ vpcus: cpus (cloud type)
+ extended: EPA parameters
+ - numas: #items requested in same NUMA
+ memory: number of 1G huge pages memory
+ paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+ interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+ - name: interface name
+ dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
+ bandwidth: X Gbps; requested guarantee bandwidth
+ vpci: requested virtual PCI address
+ disk: disk size
+ is_public:
+ #TODO to concrete
+ Returns the flavor identifier"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def delete_flavor(self, flavor_id):
+ """Deletes a tenant flavor from VIM identify by its id
+ Returns the used id or raise an exception"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_image(self, image_dict):
+ """ Adds a tenant image to VIM
+ Returns the image id or raises an exception if failed
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def delete_image(self, image_id):
+ """Deletes a tenant image from VIM
+ Returns the image_id if image is deleted or raises an exception on error"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_image_id_from_path(self, path):
+ """Get the image id from image path in the VIM database.
+ Returns the image_id or raises a vimconnNotFoundException
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_image_list(self, filter_dict={}):
+ """Obtain tenant images from VIM
+ Filter_dict can be:
+ name: image name
+ id: image uuid
+ checksum: image checksum
+ location: image path
+ Returns the image list of dictionaries:
+ [{<the fields at Filter_dict plus some VIM specific>}, ...]
+ List can be empty
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
+ disk_list=None):
+ """Adds a VM instance to VIM
+ Params:
+ 'start': (boolean) indicates if VM must start or created in pause mode.
+ 'image_id','flavor_id': image and flavor VIM id to use for the VM
+ 'net_list': list of interfaces, each one is a dictionary with:
+ 'name': (optional) name for the interface.
+ 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+ 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+ 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
+ 'mac_address': (optional) mac address to assign to this interface
+ #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+ the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+ 'type': (mandatory) can be one of:
+ 'virtual', in this case always connected to a network of type 'net_type=bridge'
+ 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+ can created unconnected
+ 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+ 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+ are allocated on the same physical NIC
+ 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+ 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+ or True, it must apply the default VIM behaviour
+ After execution the method will add the key:
+ 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+ interface. 'net_list' is modified
+ 'cloud_config': (optional) dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) string is a text script to be passed directly to cloud-init
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+ 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+ 'size': (mandatory) string with the size of the disk in GB
+ Returns the instance identifier or raises an exception on error
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_vminstance(self,vm_id):
+ """Returns the VM instance information from VIM"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def delete_vminstance(self, vm_id):
+ """Removes a VM instance from VIM
+ Returns the instance identifier"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def refresh_vms_status(self, vm_list):
+ """Get the status of the virtual machines and their interfaces/ports
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # BUILD (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces: list with interface info. Each item a dictionary with:
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected, if provided at creation
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
++ compute_node: #identification of compute node where PF,VF interface is allocated
++ pci: #PCI address of the NIC that hosts the PF,VF
++ vlan: #physical VLAN used for VF
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def action_vminstance(self, vm_id, action_dict):
+ """Send and action over a VM instance from VIM
+ Returns the vm_id if the action was successfully sent to the VIM"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_vminstance_console(self, vm_id, console_type="vnc"):
+ """
+ Get a console for the virtual machine
+ Params:
+ vm_id: uuid of the VM
+ console_type, can be:
+ "novnc" (by default), "xvpvnc" for VNC types,
+ "rdp-html5" for RDP types, "spice-html5" for SPICE types
+ Returns dict with the console parameters:
+ protocol: ssh, ftp, http, https, ...
+ server: usually ip address
+ port: the http, ssh, ... port
+ suffix: extra text, e.g. the http path and query string
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ #NOT USED METHODS in current version
+
+ def host_vim2gui(self, host, server_dict):
+ """Transform host dictionary from VIM format to GUI format,
+ and append to the server_dict
+ """
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_hosts_info(self):
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_hosts(self, vim_tenant):
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def get_processor_rankings(self):
+ """Get the processor rankings in the VIM database"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_host(self, host_data):
+ """Adds a new host to VIM"""
+ """Returns status code of the VIM response"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_external_port(self, port_data):
+ """Adds a external port to VIM"""
+ """Returns the port identifier"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_external_network(self,net_name,net_type):
+ """Adds a external network to VIM (shared)"""
+ """Returns the network identifier"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def connect_port_network(self, port_id, network_id, admin=False):
+ """Connects a external port to a network"""
+ """Returns status code of the VIM response"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
+ def new_vminstancefromJSON(self, vm_data):
+ """Adds a VM instance to VIM"""
+ """Returns the instance identifier"""
+ raise vimconnNotImplemented( "Should have implemented this" )
+
--- /dev/null
- for interface in numa.get("interfaces",() ):
- if interface["dedicated"]=="yes":
- raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ '''
+ osconnector implements all the methods to interact with openstack using the python-client.
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research"
+ __date__ ="$22-jun-2014 11:19:29$"
+
+ import vimconn
+ import json
+ import yaml
+ import logging
+ import netaddr
+ import time
+ import yaml
+ import random
+
+ from novaclient import client as nClient_v2, exceptions as nvExceptions
+ from novaclient import api_versions
+ import keystoneclient.v2_0.client as ksClient_v2
+ from novaclient.v2.client import Client as nClient
+ import keystoneclient.v3.client as ksClient
+ import keystoneclient.exceptions as ksExceptions
+ import glanceclient.v2.client as glClient
+ import glanceclient.client as gl1Client
+ import glanceclient.exc as gl1Exceptions
+ import cinderclient.v2.client as cClient_v2
+ from httplib import HTTPException
+ from neutronclient.neutron import client as neClient_v2
+ from neutronclient.v2_0 import client as neClient
+ from neutronclient.common import exceptions as neExceptions
+ from requests.exceptions import ConnectionError
+
+ '''contain the openstack virtual machine status to openmano status'''
+ vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+ 'PAUSED':'PAUSED',
+ 'SUSPENDED': 'SUSPENDED',
+ 'SHUTOFF':'INACTIVE',
+ 'BUILD':'BUILD',
+ 'ERROR':'ERROR','DELETED':'DELETED'
+ }
+ netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
+ }
+
+ #global var to have a timeout creating and deleting volumes
+ volume_timeout = 60
+ server_timeout = 60
+
+ class vimconnector(vimconn.vimconnector):
+ def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+ log_level=None, config={}, persistent_info={}):
+ '''using common constructor parameters. In this case
+ 'url' is the keystone authorization url,
+ 'url_admin' is not use
+ '''
+ self.osc_api_version = 'v2.0'
+ if config.get('APIversion') == 'v3.3':
+ self.osc_api_version = 'v3.3'
+ vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+
+ self.persistent_info = persistent_info
+ self.k_creds={}
+ self.n_creds={}
+ if self.config.get("insecure"):
+ self.k_creds["insecure"] = True
+ self.n_creds["insecure"] = True
+ if not url:
+ raise TypeError, 'url param can not be NoneType'
+ self.k_creds['auth_url'] = url
+ self.n_creds['auth_url'] = url
+ if tenant_name:
+ self.k_creds['tenant_name'] = tenant_name
+ self.n_creds['project_id'] = tenant_name
+ if tenant_id:
+ self.k_creds['tenant_id'] = tenant_id
+ self.n_creds['tenant_id'] = tenant_id
+ if user:
+ self.k_creds['username'] = user
+ self.n_creds['username'] = user
+ if passwd:
+ self.k_creds['password'] = passwd
+ self.n_creds['api_key'] = passwd
+ if self.osc_api_version == 'v3.3':
+ self.k_creds['project_name'] = tenant_name
+ self.k_creds['project_id'] = tenant_id
+ if config.get('region_name'):
+ self.k_creds['region_name'] = config.get('region_name')
+ self.n_creds['region_name'] = config.get('region_name')
+
+ self.reload_client = True
+ self.logger = logging.getLogger('openmano.vim.openstack')
+ if log_level:
+ self.logger.setLevel( getattr(logging, log_level) )
+
+ def __setitem__(self,index, value):
+ '''Set individuals parameters
+ Throw TypeError, KeyError
+ '''
+ if index=='tenant_id':
+ self.reload_client=True
+ self.tenant_id = value
+ if self.osc_api_version == 'v3.3':
+ if value:
+ self.k_creds['project_id'] = value
+ self.n_creds['project_id'] = value
+ else:
+ del self.k_creds['project_id']
+ del self.n_creds['project_id']
+ else:
+ if value:
+ self.k_creds['tenant_id'] = value
+ self.n_creds['tenant_id'] = value
+ else:
+ del self.k_creds['tenant_id']
+ del self.n_creds['tenant_id']
+ elif index=='tenant_name':
+ self.reload_client=True
+ self.tenant_name = value
+ if self.osc_api_version == 'v3.3':
+ if value:
+ self.k_creds['project_name'] = value
+ self.n_creds['project_name'] = value
+ else:
+ del self.k_creds['project_name']
+ del self.n_creds['project_name']
+ else:
+ if value:
+ self.k_creds['tenant_name'] = value
+ self.n_creds['project_id'] = value
+ else:
+ del self.k_creds['tenant_name']
+ del self.n_creds['project_id']
+ elif index=='user':
+ self.reload_client=True
+ self.user = value
+ if value:
+ self.k_creds['username'] = value
+ self.n_creds['username'] = value
+ else:
+ del self.k_creds['username']
+ del self.n_creds['username']
+ elif index=='passwd':
+ self.reload_client=True
+ self.passwd = value
+ if value:
+ self.k_creds['password'] = value
+ self.n_creds['api_key'] = value
+ else:
+ del self.k_creds['password']
+ del self.n_creds['api_key']
+ elif index=='url':
+ self.reload_client=True
+ self.url = value
+ if value:
+ self.k_creds['auth_url'] = value
+ self.n_creds['auth_url'] = value
+ else:
+ raise TypeError, 'url param can not be NoneType'
+ else:
+ vimconn.vimconnector.__setitem__(self,index, value)
+
+ def _reload_connection(self):
+ '''Called before any operation, it check if credentials has changed
+ Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
+ '''
+ #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
+ if self.reload_client:
+ #test valid params
+ if len(self.n_creds) <4:
+ raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
+ if self.osc_api_version == 'v3.3':
+ self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds)
+ #TODO To be updated for v3
+ #self.cinder = cClient.Client(**self.n_creds)
+ self.keystone = ksClient.Client(**self.k_creds)
+ self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+ self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+ else:
+ self.nova = nClient_v2.Client(version='2', **self.n_creds)
+ self.cinder = cClient_v2.Client(**self.n_creds)
+ self.keystone = ksClient_v2.Client(**self.k_creds)
+ self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+ self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+ self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+ self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
+ self.reload_client = False
+
+ def __net_os2mano(self, net_list_dict):
+ '''Transform the net openstack format to mano format
+ net_list_dict can be a list of dict or a single dict'''
+ if type(net_list_dict) is dict:
+ net_list_=(net_list_dict,)
+ elif type(net_list_dict) is list:
+ net_list_=net_list_dict
+ else:
+ raise TypeError("param net_list_dict must be a list or a dictionary")
+ for net in net_list_:
+ if net.get('provider:network_type') == "vlan":
+ net['type']='data'
+ else:
+ net['type']='bridge'
+
+
+
+ def _format_exception(self, exception):
+ '''Transform a keystone, nova, neutron exception into a vimconn exception'''
+ if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+ ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed
+ )):
+ raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
+ neExceptions.NeutronException, nvExceptions.BadRequest)):
+ raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound)):
+ raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, nvExceptions.Conflict):
+ raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
+ else: # ()
+ raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+
+ def get_tenant_list(self, filter_dict={}):
+ '''Obtain tenants of VIM
+ filter_dict can contain the following keys:
+ name: filter by tenant name
+ id: filter by tenant uuid/id
+ <other VIM specific>
+ Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+ '''
+ self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.osc_api_version == 'v3.3':
+ project_class_list=self.keystone.projects.findall(**filter_dict)
+ else:
+ project_class_list=self.keystone.tenants.findall(**filter_dict)
+ project_list=[]
+ for project in project_class_list:
+ project_list.append(project.to_dict())
+ return project_list
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_tenant(self, tenant_name, tenant_description):
+ '''Adds a new tenant to openstack VIM. Returns the tenant identifier'''
+ self.logger.debug("Adding a new tenant name: %s", tenant_name)
+ try:
+ self._reload_connection()
+ if self.osc_api_version == 'v3.3':
+ project=self.keystone.projects.create(tenant_name, tenant_description)
+ else:
+ project=self.keystone.tenants.create(tenant_name, tenant_description)
+ return project.id
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_tenant(self, tenant_id):
+ '''Delete a tenant from openstack VIM. Returns the old tenant identifier'''
+ self.logger.debug("Deleting tenant %s from VIM", tenant_id)
+ try:
+ self._reload_connection()
+ if self.osc_api_version == 'v3.3':
+ self.keystone.projects.delete(tenant_id)
+ else:
+ self.keystone.tenants.delete(tenant_id)
+ return tenant_id
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ '''Adds a tenant network to VIM. Returns the network identifier'''
+ self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
+ #self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
+ try:
+ new_net = None
+ self._reload_connection()
+ network_dict = {'name': net_name, 'admin_state_up': True}
+ if net_type=="data" or net_type=="ptp":
+ if self.config.get('dataplane_physical_net') == None:
+ raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
+ network_dict["provider:physical_network"] = self.config['dataplane_physical_net'] #"physnet_sriov" #TODO physical
+ network_dict["provider:network_type"] = "vlan"
+ if vlan!=None:
+ network_dict["provider:network_type"] = vlan
+ network_dict["shared"]=shared
+ new_net=self.neutron.create_network({'network':network_dict})
+ #print new_net
+ #create subnetwork, even if there is no profile
+ if not ip_profile:
+ ip_profile = {}
+ if 'subnet_address' not in ip_profile:
+ #Fake subnet is required
+ subnet_rand = random.randint(0, 255)
+ ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
+ if 'ip_version' not in ip_profile:
+ ip_profile['ip_version'] = "IPv4"
+ subnet={"name":net_name+"-subnet",
+ "network_id": new_net["network"]["id"],
+ "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
+ "cidr": ip_profile['subnet_address']
+ }
+ if 'gateway_address' in ip_profile:
+ subnet['gateway_ip'] = ip_profile['gateway_address']
+ if ip_profile.get('dns_address'):
+ #TODO: manage dns_address as a list of addresses separated by commas
+ subnet['dns_nameservers'] = []
+ subnet['dns_nameservers'].append(ip_profile['dns_address'])
+ if 'dhcp_enabled' in ip_profile:
+ subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
+ if 'dhcp_start_address' in ip_profile:
+ subnet['allocation_pools']=[]
+ subnet['allocation_pools'].append(dict())
+ subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
+ if 'dhcp_count' in ip_profile:
+ #parts = ip_profile['dhcp_start_address'].split('.')
+ #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
+ ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
+ ip_int += ip_profile['dhcp_count'] - 1
+ ip_str = str(netaddr.IPAddress(ip_int))
+ subnet['allocation_pools'][0]['end'] = ip_str
+ #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
+ self.neutron.create_subnet({"subnet": subnet} )
+ return new_net["network"]["id"]
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+ if new_net:
+ self.neutron.delete_network(new_net['network']['id'])
+ self._format_exception(e)
+
+ def get_network_list(self, filter_dict={}):
+ '''Obtain tenant networks of VIM
+ Filter_dict can be:
+ name: network name
+ id: network uuid
+ shared: boolean
+ tenant_id: tenant
+ admin_state_up: boolean
+ status: 'ACTIVE'
+ Returns the network list of dictionaries
+ '''
+ self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ net_dict=self.neutron.list_networks(**filter_dict)
+ net_list=net_dict["networks"]
+ self.__net_os2mano(net_list)
+ return net_list
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def get_network(self, net_id):
+ '''Obtain details of network from VIM
+ Returns the network information from a network id'''
+ self.logger.debug(" Getting tenant network %s from VIM", net_id)
+ filter_dict={"id": net_id}
+ net_list = self.get_network_list(filter_dict)
+ if len(net_list)==0:
+ raise vimconn.vimconnNotFoundException("Network '{}' not found".format(net_id))
+ elif len(net_list)>1:
+ raise vimconn.vimconnConflictException("Found more than one network with this criteria")
+ net = net_list[0]
+ subnets=[]
+ for subnet_id in net.get("subnets", () ):
+ try:
+ subnet = self.neutron.show_subnet(subnet_id)
+ except Exception as e:
+ self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e)))
+ subnet = {"id": subnet_id, "fault": str(e)}
+ subnets.append(subnet)
+ net["subnets"] = subnets
++ net["encapsulation"] = net.get('provider:network_type')
++ net["segmentation_id"] = net.get('provider:segmentation_id')
+ return net
+
+ def delete_network(self, net_id):
+ '''Deletes a tenant network from VIM. Returns the old network identifier'''
+ self.logger.debug("Deleting network '%s' from VIM", net_id)
+ try:
+ self._reload_connection()
+ #delete VM ports attached to this networks before the network
+ ports = self.neutron.list_ports(network_id=net_id)
+ for p in ports['ports']:
+ try:
+ self.neutron.delete_port(p["id"])
+ except Exception as e:
+ self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+ self.neutron.delete_network(net_id)
+ return net_id
+ except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException,
+ ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def refresh_nets_status(self, net_list):
+ '''Get the status of the networks
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+
+ '''
+ net_dict={}
+ for net_id in net_list:
+ net = {}
+ try:
+ net_vim = self.get_network(net_id)
+ if net_vim['status'] in netStatus2manoFormat:
+ net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+ else:
+ net["status"] = "OTHER"
+ net["error_msg"] = "VIM status reported " + net_vim['status']
+
+ if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
+ net['status'] = 'DOWN'
+ try:
+ net['vim_info'] = yaml.safe_dump(net_vim, default_flow_style=True, width=256)
+ except yaml.representer.RepresenterError:
+ net['vim_info'] = str(net_vim)
+ if net_vim.get('fault'): #TODO
+ net['error_msg'] = str(net_vim['fault'])
+ except vimconn.vimconnNotFoundException as e:
+ self.logger.error("Exception getting net status: %s", str(e))
+ net['status'] = "DELETED"
+ net['error_msg'] = str(e)
+ except vimconn.vimconnException as e:
+ self.logger.error("Exception getting net status: %s", str(e))
+ net['status'] = "VIM_ERROR"
+ net['error_msg'] = str(e)
+ net_dict[net_id] = net
+ return net_dict
+
+ def get_flavor(self, flavor_id):
+ '''Obtain flavor details from the VIM. Returns the flavor dict details'''
+ self.logger.debug("Getting flavor '%s'", flavor_id)
+ try:
+ self._reload_connection()
+ flavor = self.nova.flavors.find(id=flavor_id)
+ #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ return flavor.to_dict()
+ except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def get_flavor_id_from_data(self, flavor_dict):
+ """Obtain flavor id that match the flavor description
+ Returns the flavor_id or raises a vimconnNotFoundException
+ """
+ try:
+ self._reload_connection()
+ numa=None
+ numas = flavor_dict.get("extended",{}).get("numas")
+ if numas:
+ #TODO
+ raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
+ # if len(numas) > 1:
+ # raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
+ # numa=numas[0]
+ # numas = extended.get("numas")
+ for flavor in self.nova.flavors.list():
+ epa = flavor.get_keys()
+ if epa:
+ continue
+ #TODO
+ if flavor.ram != flavor_dict["ram"]:
+ continue
+ if flavor.vcpus != flavor_dict["vcpus"]:
+ continue
+ if flavor.disk != flavor_dict["disk"]:
+ continue
+ return flavor.id
+ raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
+ except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+
+ def new_flavor(self, flavor_data, change_name_if_used=True):
+ '''Adds a tenant flavor to openstack VIM
+ if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
+ Returns the flavor identifier
+ '''
+ self.logger.debug("Adding flavor '%s'", str(flavor_data))
+ retry=0
+ max_retries=3
+ name_suffix = 0
+ name=flavor_data['name']
+ while retry<max_retries:
+ retry+=1
+ try:
+ self._reload_connection()
+ if change_name_if_used:
+ #get used names
+ fl_names=[]
+ fl=self.nova.flavors.list()
+ for f in fl:
+ fl_names.append(f.name)
+ while name in fl_names:
+ name_suffix += 1
+ name = flavor_data['name']+"-" + str(name_suffix)
+
+ ram = flavor_data.get('ram',64)
+ vcpus = flavor_data.get('vcpus',1)
+ numa_properties=None
+
+ extended = flavor_data.get("extended")
+ if extended:
+ numas=extended.get("numas")
+ if numas:
+ numa_nodes = len(numas)
+ if numa_nodes > 1:
+ return -1, "Can not add flavor with more than one numa"
+ numa_properties = {"hw:numa_nodes":str(numa_nodes)}
+ numa_properties["hw:mem_page_size"] = "large"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ numa_properties["hw:numa_mempolicy"] = "strict"
+ for numa in numas:
+ #overwrite ram and vcpus
+ ram = numa['memory']*1024
+ if 'paired-threads' in numa:
+ vcpus = numa['paired-threads']*2
+ numa_properties["hw:cpu_threads_policy"] = "prefer"
+ elif 'cores' in numa:
+ vcpus = numa['cores']
+ #numa_properties["hw:cpu_threads_policy"] = "prefer"
+ elif 'threads' in numa:
+ vcpus = numa['threads']
+ numa_properties["hw:cpu_policy"] = "isolated"
- if net["type"]=="virtual" or net["type"]=="VF":
- port_dict={
- "network_id": net["net_id"],
- "name": net.get("name"),
- "admin_state_up": True
- }
- if net["type"]=="virtual":
- if "vpci" in net:
- metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- else: # for VF
- if "vpci" in net:
- if "VF" not in metadata_vpci:
- metadata_vpci["VF"]=[]
- metadata_vpci["VF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"]="direct"
- if not port_dict["name"]:
- port_dict["name"]=name
- if net.get("mac_address"):
- port_dict["mac_address"]=net["mac_address"]
- if net.get("port_security") == False:
- port_dict["port_security_enabled"]=net["port_security"]
- new_port = self.neutron.create_port({"port": port_dict })
- net["mac_adress"] = new_port["port"]["mac_address"]
- net["vim_id"] = new_port["port"]["id"]
- net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
- net_list_vim.append({"port-id": new_port["port"]["id"]})
- else: # for PF
- self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
- #TODO insert this when openstack consider passthrough ports as openstack neutron ports
++ # for interface in numa.get("interfaces",() ):
++ # if interface["dedicated"]=="yes":
++ # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
++ # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+
+ #create flavor
+ new_flavor=self.nova.flavors.create(name,
+ ram,
+ vcpus,
+ flavor_data.get('disk',1),
+ is_public=flavor_data.get('is_public', True)
+ )
+ #add metadata
+ if numa_properties:
+ new_flavor.set_keys(numa_properties)
+ return new_flavor.id
+ except nvExceptions.Conflict as e:
+ if change_name_if_used and retry < max_retries:
+ continue
+ self._format_exception(e)
+ #except nvExceptions.BadRequest as e:
+ except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_flavor(self,flavor_id):
+ '''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
+ '''
+ try:
+ self._reload_connection()
+ self.nova.flavors.delete(flavor_id)
+ return flavor_id
+ #except nvExceptions.BadRequest as e:
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_image(self,image_dict):
+ '''
+ Adds a tenant image to VIM. imge_dict is a dictionary with:
+ name: name
+ disk_format: qcow2, vhd, vmdk, raw (by default), ...
+ location: path or URI
+ public: "yes" or "no"
+ metadata: metadata of the image
+ Returns the image_id
+ '''
+ #using version 1 of glance client
+ glancev1 = gl1Client.Client('1',self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
+ retry=0
+ max_retries=3
+ while retry<max_retries:
+ retry+=1
+ try:
+ self._reload_connection()
+ #determine format http://docs.openstack.org/developer/glance/formats.html
+ if "disk_format" in image_dict:
+ disk_format=image_dict["disk_format"]
+ else: #autodiscover based on extension
+ if image_dict['location'][-6:]==".qcow2":
+ disk_format="qcow2"
+ elif image_dict['location'][-4:]==".vhd":
+ disk_format="vhd"
+ elif image_dict['location'][-5:]==".vmdk":
+ disk_format="vmdk"
+ elif image_dict['location'][-4:]==".vdi":
+ disk_format="vdi"
+ elif image_dict['location'][-4:]==".iso":
+ disk_format="iso"
+ elif image_dict['location'][-4:]==".aki":
+ disk_format="aki"
+ elif image_dict['location'][-4:]==".ari":
+ disk_format="ari"
+ elif image_dict['location'][-4:]==".ami":
+ disk_format="ami"
+ else:
+ disk_format="raw"
+ self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
+ if image_dict['location'][0:4]=="http":
+ new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ container_format="bare", location=image_dict['location'], disk_format=disk_format)
+ else: #local path
+ with open(image_dict['location']) as fimage:
+ new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ container_format="bare", data=fimage, disk_format=disk_format)
+ #insert metadata. We cannot use 'new_image.properties.setdefault'
+ #because nova and glance are "INDEPENDENT" and we are using nova for reading metadata
+ new_image_nova=self.nova.images.find(id=new_image.id)
+ new_image_nova.metadata.setdefault('location',image_dict['location'])
+ metadata_to_load = image_dict.get('metadata')
+ if metadata_to_load:
+ for k,v in yaml.load(metadata_to_load).iteritems():
+ new_image_nova.metadata.setdefault(k,v)
+ return new_image.id
+ except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ self._format_exception(e)
+ except (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+ if retry==max_retries:
+ continue
+ self._format_exception(e)
+ except IOError as e: #can not open the file
+ raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)+ " for " + image_dict['location'],
+ http_code=vimconn.HTTP_Bad_Request)
+
+ def delete_image(self, image_id):
+ '''Deletes a tenant image from openstack VIM. Returns the old id
+ '''
+ try:
+ self._reload_connection()
+ self.nova.images.delete(image_id)
+ return image_id
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: #TODO remove
+ self._format_exception(e)
+
+ def get_image_id_from_path(self, path):
+ '''Get the image id from image path in the VIM database. Returns the image_id'''
+ try:
+ self._reload_connection()
+ images = self.nova.images.list()
+ for image in images:
+ if image.metadata.get("location")==path:
+ return image.id
+ raise vimconn.vimconnNotFoundException("image with location '{}' not found".format( path))
+ except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+ self._format_exception(e)
+
+ def get_image_list(self, filter_dict={}):
+ '''Obtain tenant images from VIM
+ Filter_dict can be:
+ id: image id
+ name: image name
+ checksum: image checksum
+ Returns the image list of dictionaries:
+ [{<the fields at Filter_dict plus some VIM specific>}, ...]
+ List can be empty
+ '''
+ self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
+ try:
+ self._reload_connection()
+ filter_dict_os=filter_dict.copy()
+ #First we filter by the available filter fields: name, id. The others are removed.
+ filter_dict_os.pop('checksum',None)
+ image_list=self.nova.images.findall(**filter_dict_os)
+ if len(image_list)==0:
+ return []
+ #Then we filter by the rest of filter fields: checksum
+ filtered_list = []
+ for image in image_list:
+ image_class=self.glance.images.get(image.id)
+ if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
+ filtered_list.append(image_class.copy())
+ return filtered_list
+ except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
+ '''Adds a VM instance to VIM
+ Params:
+ start: indicates if VM must start or boot in pause mode. Ignored
+ image_id,flavor_id: iamge and flavor uuid
+ net_list: list of interfaces, each one is a dictionary with:
+ name:
+ net_id: network uuid to connect
+ vpci: virtual vcpi to assign, ignored because openstack lack #TODO
+ model: interface model, ignored #TODO
+ mac_address: used for SR-IOV ifaces #TODO for other types
+ use: 'data', 'bridge', 'mgmt'
+ type: 'virtual', 'PF', 'VF', 'VFnotShared'
+ vim_id: filled/added by this function
+ floating_ip: True/False (or it can be None)
+ #TODO ip, security groups
+ Returns the instance identifier
+ '''
+ self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
+ try:
+ metadata={}
+ net_list_vim=[]
+ external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
+ self._reload_connection()
+ metadata_vpci={} #For a specific neutron plugin
+ for net in net_list:
+ if not net.get("net_id"): #skip non connected iface
+ continue
++
++ port_dict={
++ "network_id": net["net_id"],
++ "name": net.get("name"),
++ "admin_state_up": True
++ }
++ if net["type"]=="virtual":
++ if "vpci" in net:
++ metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
++ elif net["type"]=="VF": # for VF
++ if "vpci" in net:
++ if "VF" not in metadata_vpci:
++ metadata_vpci["VF"]=[]
++ metadata_vpci["VF"].append([ net["vpci"], "" ])
++ port_dict["binding:vnic_type"]="direct"
++ else: #For PT
++ if "vpci" in net:
++ if "PF" not in metadata_vpci:
++ metadata_vpci["PF"]=[]
++ metadata_vpci["PF"].append([ net["vpci"], "" ])
++ port_dict["binding:vnic_type"]="direct-physical"
++ if not port_dict["name"]:
++ port_dict["name"]=name
++ if net.get("mac_address"):
++ port_dict["mac_address"]=net["mac_address"]
++ if net.get("port_security") == False:
++ port_dict["port_security_enabled"]=net["port_security"]
++ new_port = self.neutron.create_port({"port": port_dict })
++ net["mac_adress"] = new_port["port"]["mac_address"]
++ net["vim_id"] = new_port["port"]["id"]
++ net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
++ net_list_vim.append({"port-id": new_port["port"]["id"]})
++
+ if net.get('floating_ip', False):
+ net['exit_on_floating_ip_error'] = True
+ external_network.append(net)
+ elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
+ net['exit_on_floating_ip_error'] = False
+ external_network.append(net)
+
+ if metadata_vpci:
+ metadata = {"pci_assignement": json.dumps(metadata_vpci)}
+ if len(metadata["pci_assignement"]) >255:
+ #limit the metadata size
+ #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
+ self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
+ metadata = {}
+
+ self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s",
+ name, image_id, flavor_id, str(net_list_vim), description, str(metadata))
+
+ security_groups = self.config.get('security_groups')
+ if type(security_groups) is str:
+ security_groups = ( security_groups, )
+ #cloud config
+ userdata=None
+ config_drive = None
+ if isinstance(cloud_config, dict):
+ if cloud_config.get("user-data"):
+ userdata=cloud_config["user-data"]
+ if cloud_config.get("boot-data-drive") != None:
+ config_drive = cloud_config["boot-data-drive"]
+ if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+ if userdata:
+ raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'")
+ userdata_dict={}
+ #default user
+ if cloud_config.get("key-pairs"):
+ userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
+ userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }]
+ if cloud_config.get("users"):
+ if "users" not in userdata_dict:
+ userdata_dict["users"] = [ "default" ]
+ for user in cloud_config["users"]:
+ user_info = {
+ "name" : user["name"],
+ "sudo": "ALL = (ALL)NOPASSWD:ALL"
+ }
+ if "user-info" in user:
+ user_info["gecos"] = user["user-info"]
+ if user.get("key-pairs"):
+ user_info["ssh-authorized-keys"] = user["key-pairs"]
+ userdata_dict["users"].append(user_info)
+
+ if cloud_config.get("config-files"):
+ userdata_dict["write_files"] = []
+ for file in cloud_config["config-files"]:
+ file_info = {
+ "path" : file["dest"],
+ "content": file["content"]
+ }
+ if file.get("encoding"):
+ file_info["encoding"] = file["encoding"]
+ if file.get("permissions"):
+ file_info["permissions"] = file["permissions"]
+ if file.get("owner"):
+ file_info["owner"] = file["owner"]
+ userdata_dict["write_files"].append(file_info)
+ userdata = "#cloud-config\n"
+ userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+ self.logger.debug("userdata: %s", userdata)
+ elif isinstance(cloud_config, str):
+ userdata = cloud_config
+
+ #Create additional volumes in case these are present in disk_list
+ block_device_mapping = None
+ base_disk_index = ord('b')
+ if disk_list != None:
+ block_device_mapping = dict()
+ for disk in disk_list:
+ if 'image_id' in disk:
+ volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
+ chr(base_disk_index), imageRef = disk['image_id'])
+ else:
+ volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+ chr(base_disk_index))
+ block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
+ base_disk_index += 1
+
+ #wait until volumes are with status available
+ keep_waiting = True
+ elapsed_time = 0
+ while keep_waiting and elapsed_time < volume_timeout:
+ keep_waiting = False
+ for volume_id in block_device_mapping.itervalues():
+ if self.cinder.volumes.get(volume_id).status != 'available':
+ keep_waiting = True
+ if keep_waiting:
+ time.sleep(1)
+ elapsed_time += 1
+
+ #if we exceeded the timeout rollback
+ if elapsed_time >= volume_timeout:
+ #delete the volumes we just created
+ for volume_id in block_device_mapping.itervalues():
+ self.cinder.volumes.delete(volume_id)
+
+ #delete ports we just created
+ for net_item in net_list_vim:
+ if 'port-id' in net_item:
+ self.neutron.delete_port(net_item['port-id'])
+
+ raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+ http_code=vimconn.HTTP_Request_Timeout)
+
+ server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
+ security_groups=security_groups,
+ availability_zone=self.config.get('availability_zone'),
+ key_name=self.config.get('keypair'),
+ userdata=userdata,
+ config_drive = config_drive,
+ block_device_mapping = block_device_mapping
+ ) # , description=description)
+ #print "DONE :-)", server
+ pool_id = None
+ floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+ for floating_network in external_network:
+ try:
+ # wait until vm is active
+ elapsed_time = 0
+ while elapsed_time < server_timeout:
+ status = self.nova.servers.get(server.id).status
+ if status == 'ACTIVE':
+ break
+ time.sleep(1)
+ elapsed_time += 1
+
+ #if we exceeded the timeout rollback
+ if elapsed_time >= server_timeout:
+ raise vimconn.vimconnException('Timeout creating instance ' + name,
+ http_code=vimconn.HTTP_Request_Timeout)
+
+ assigned = False
+ while(assigned == False):
+ if floating_ips:
+ ip = floating_ips.pop(0)
+ if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id:
+ free_floating_ip = ip.get("floating_ip_address")
+ try:
+ fix_ip = floating_network.get('ip')
+ server.add_floating_ip(free_floating_ip, fix_ip)
+ assigned = True
+ except Exception as e:
+ raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict)
+ else:
+ #Find the external network
+ external_nets = list()
+ for net in self.neutron.list_networks()['networks']:
+ if net['router:external']:
+ external_nets.append(net)
+
+ if len(external_nets) == 0:
+ raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
+ "network is present",
+ http_code=vimconn.HTTP_Conflict)
+ if len(external_nets) > 1:
+ raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
+ "external networks are present",
+ http_code=vimconn.HTTP_Conflict)
+
+ pool_id = external_nets[0].get('id')
+ param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
+ try:
+ #self.logger.debug("Creating floating IP")
+ new_floating_ip = self.neutron.create_floatingip(param)
+ free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
+ fix_ip = floating_network.get('ip')
+ server.add_floating_ip(free_floating_ip, fix_ip)
+ assigned=True
+ except Exception as e:
+ raise vimconn.vimconnException(type(e).__name__ + ": Cannot assign floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict)
+ except Exception as e:
+ if not floating_network['exit_on_floating_ip_error']:
+ self.logger.warn("Cannot create floating_ip. %s", str(e))
+ continue
+ self.delete_vminstance(server.id)
+ raise
+
+ return server.id
+ # except nvExceptions.NotFound as e:
+ # error_value=-vimconn.HTTP_Not_Found
+ # error_text= "vm instance %s not found" % vm_id
+ except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+ # delete the volumes we just created
+ if block_device_mapping != None:
+ for volume_id in block_device_mapping.itervalues():
+ self.cinder.volumes.delete(volume_id)
+
+ # delete ports we just created
+ for net_item in net_list_vim:
+ if 'port-id' in net_item:
+ self.neutron.delete_port(net_item['port-id'])
+ self._format_exception(e)
+ except TypeError as e:
+ raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
+
+ def get_vminstance(self,vm_id):
+ '''Returns the VM instance information from VIM'''
+ #self.logger.debug("Getting VM from VIM")
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ return server.to_dict()
+ except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+ self._format_exception(e)
+
+ def get_vminstance_console(self,vm_id, console_type="vnc"):
+ '''
+ Get a console for the virtual machine
+ Params:
+ vm_id: uuid of the VM
+ console_type, can be:
+ "novnc" (by default), "xvpvnc" for VNC types,
+ "rdp-html5" for RDP types, "spice-html5" for SPICE types
+ Returns dict with the console parameters:
+ protocol: ssh, ftp, http, https, ...
+ server: usually ip address
+ port: the http, ssh, ... port
+ suffix: extra text, e.g. the http path and query string
+ '''
+ self.logger.debug("Getting VM CONSOLE from VIM")
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ if console_type == None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.vimconnException("console type '{}' not allowed".format(console_type), http_code=vimconn.HTTP_Bad_Request)
+
+ console_dict1 = console_dict.get("console")
+ if console_dict1:
+ console_url = console_dict1.get("url")
+ if console_url:
+ #parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+ port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+ if protocol_index < 0 or port_index<0 or suffix_index<0:
+ return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
+ console_dict={"protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index+2:port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index+1:]
+ }
+ protocol_index += 2
+ return console_dict
+ raise vimconn.vimconnUnexpectedResponse("Unexpected response from VIM")
+
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_vminstance(self, vm_id):
+ '''Removes a VM instance from VIM. Returns the old identifier
+ '''
+ #print "osconnector: Getting VM from VIM"
+ try:
+ self._reload_connection()
+ #delete VM ports attached to this networks before the virtual machine
+ ports = self.neutron.list_ports(device_id=vm_id)
+ for p in ports['ports']:
+ try:
+ self.neutron.delete_port(p["id"])
+ except Exception as e:
+ self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e))
+
+ #commented because detaching the volumes makes the servers.delete not work properly ?!?
+ #dettach volumes attached
+ server = self.nova.servers.get(vm_id)
+ volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']
+ #for volume in volumes_attached_dict:
+ # self.cinder.volumes.detach(volume['id'])
+
+ self.nova.servers.delete(vm_id)
+
+ #delete volumes.
+ #Although having detached them should have them in active status
+ #we ensure in this loop
+ keep_waiting = True
+ elapsed_time = 0
+ while keep_waiting and elapsed_time < volume_timeout:
+ keep_waiting = False
+ for volume in volumes_attached_dict:
+ if self.cinder.volumes.get(volume['id']).status != 'available':
+ keep_waiting = True
+ else:
+ self.cinder.volumes.delete(volume['id'])
+ if keep_waiting:
+ time.sleep(1)
+ elapsed_time += 1
+
+ return vm_id
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+ self._format_exception(e)
+ #TODO insert exception vimconn.HTTP_Unauthorized
+ #if reaching here is because an exception
+
+ def refresh_vms_status(self, vm_list):
+ '''Get the status of the virtual machines and their interfaces/ports
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # CREATING (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces:
+ - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
++ compute_node: #identification of compute node where PF,VF interface is allocated
++ pci: #PCI address of the NIC that hosts the PF,VF
++ vlan: #physical VLAN used for VF
+ '''
+ vm_dict={}
+ self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
+ for vm_id in vm_list:
+ vm={}
+ try:
+ vm_vim = self.get_vminstance(vm_id)
+ if vm_vim['status'] in vmStatus2manoFormat:
+ vm['status'] = vmStatus2manoFormat[ vm_vim['status'] ]
+ else:
+ vm['status'] = "OTHER"
+ vm['error_msg'] = "VIM status reported " + vm_vim['status']
+ try:
+ vm['vim_info'] = yaml.safe_dump(vm_vim, default_flow_style=True, width=256)
+ except yaml.representer.RepresenterError:
+ vm['vim_info'] = str(vm_vim)
+ vm["interfaces"] = []
+ if vm_vim.get('fault'):
+ vm['error_msg'] = str(vm_vim['fault'])
+ #get interfaces
+ try:
+ self._reload_connection()
+ port_dict=self.neutron.list_ports(device_id=vm_id)
+ for port in port_dict["ports"]:
+ interface={}
+ try:
+ interface['vim_info'] = yaml.safe_dump(port, default_flow_style=True, width=256)
+ except yaml.representer.RepresenterError:
+ interface['vim_info'] = str(port)
+ interface["mac_address"] = port.get("mac_address")
+ interface["vim_net_id"] = port["network_id"]
+ interface["vim_interface_id"] = port["id"]
++ interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
++ interface["pci"] = None
++ if port['binding:profile'].get('pci_slot'):
++ # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
++ # TODO: This is just a workaround valid for niantinc. Find a better way to do so
++ # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
++ pci = port['binding:profile']['pci_slot']
++ # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
++ interface["pci"] = pci
++ interface["vlan"] = None
++ #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
++ network = self.neutron.show_network(port["network_id"])
++ if network['network'].get('provider:network_type') == 'vlan' and \
++ port.get("binding:vnic_type") == "direct":
++ interface["vlan"] = network['network'].get('provider:segmentation_id')
+ ips=[]
+ #look for floating ip address
+ floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
+ if floating_ip_dict.get("floatingips"):
+ ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
+
+ for subnet in port["fixed_ips"]:
+ ips.append(subnet["ip_address"])
+ interface["ip_address"] = ";".join(ips)
+ vm["interfaces"].append(interface)
+ except Exception as e:
+ self.logger.error("Error getting vm interface information " + type(e).__name__ + ": "+ str(e))
+ except vimconn.vimconnNotFoundException as e:
+ self.logger.error("Exception getting vm status: %s", str(e))
+ vm['status'] = "DELETED"
+ vm['error_msg'] = str(e)
+ except vimconn.vimconnException as e:
+ self.logger.error("Exception getting vm status: %s", str(e))
+ vm['status'] = "VIM_ERROR"
+ vm['error_msg'] = str(e)
+ vm_dict[vm_id] = vm
+ return vm_dict
+
+ def action_vminstance(self, vm_id, action_dict):
+ '''Send and action over a VM instance from VIM
+ Returns the vm_id if the action was successfully sent to the VIM'''
+ self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ if "start" in action_dict:
+ if action_dict["start"]=="rebuild":
+ server.rebuild()
+ else:
+ if server.status=="PAUSED":
+ server.unpause()
+ elif server.status=="SUSPENDED":
+ server.resume()
+ elif server.status=="SHUTOFF":
+ server.start()
+ elif "pause" in action_dict:
+ server.pause()
+ elif "resume" in action_dict:
+ server.resume()
+ elif "shutoff" in action_dict or "shutdown" in action_dict:
+ server.stop()
+ elif "forceOff" in action_dict:
+ server.stop() #TODO
+ elif "terminate" in action_dict:
+ server.delete()
+ elif "createImage" in action_dict:
+ server.create_image()
+ #"path":path_schema,
+ #"description":description_schema,
+ #"name":name_schema,
+ #"metadata":metadata_schema,
+ #"imageRef": id_schema,
+ #"disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+ elif "rebuild" in action_dict:
+ server.rebuild(server.image['id'])
+ elif "reboot" in action_dict:
+ server.reboot() #reboot_type='SOFT'
+ elif "console" in action_dict:
+ console_type = action_dict["console"]
+ if console_type == None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request)
+ try:
+ console_url = console_dict["console"]["url"]
+ #parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+ port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+ if protocol_index < 0 or port_index<0 or suffix_index<0:
+ raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+ console_dict2={"protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index+2 : port_index],
+ "port": int(console_url[port_index+1 : suffix_index]),
+ "suffix": console_url[suffix_index+1:]
+ }
+ return console_dict2
+ except Exception as e:
+ raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+
+ return vm_id
+ except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+ self._format_exception(e)
+ #TODO insert exception vimconn.HTTP_Unauthorized
+
+ #NOT USED FUNCTIONS
+
+ def new_external_port(self, port_data):
+ #TODO openstack if needed
+ '''Adds a external port to VIM'''
+ '''Returns the port identifier'''
+ return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
+
+ def connect_port_network(self, port_id, network_id, admin=False):
+ #TODO openstack if needed
+ '''Connects a external port to a network'''
+ '''Returns status code of the VIM response'''
+ return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
+
+ def new_user(self, user_name, user_passwd, tenant_id=None):
+ '''Adds a new user to openstack VIM'''
+ '''Returns the user identifier'''
+ self.logger.debug("osconnector: Adding a new user to VIM")
+ try:
+ self._reload_connection()
+ user=self.keystone.users.create(user_name, user_passwd, tenant_id=tenant_id)
+ #self.keystone.tenants.add_user(self.k_creds["username"], #role)
+ return user.id
+ except ksExceptions.ConnectionError as e:
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ except ksExceptions.ClientException as e: #TODO remove
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ #TODO insert exception vimconn.HTTP_Unauthorized
+ #if reaching here is because an exception
+ if self.debug:
+ self.logger.debug("new_user " + error_text)
+ return error_value, error_text
+
+ def delete_user(self, user_id):
+ '''Delete a user from openstack VIM'''
+ '''Returns the user identifier'''
+ if self.debug:
+ print "osconnector: Deleting a user from VIM"
+ try:
+ self._reload_connection()
+ self.keystone.users.delete(user_id)
+ return 1, user_id
+ except ksExceptions.ConnectionError as e:
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ except ksExceptions.NotFound as e:
+ error_value=-vimconn.HTTP_Not_Found
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ except ksExceptions.ClientException as e: #TODO remove
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ #TODO insert exception vimconn.HTTP_Unauthorized
+ #if reaching here is because an exception
+ if self.debug:
+ print "delete_tenant " + error_text
+ return error_value, error_text
+
+ def get_hosts_info(self):
+ '''Get the information of deployed hosts
+ Returns the hosts content'''
+ if self.debug:
+ print "osconnector: Getting Host info from VIM"
+ try:
+ h_list=[]
+ self._reload_connection()
+ hypervisors = self.nova.hypervisors.list()
+ for hype in hypervisors:
+ h_list.append( hype.to_dict() )
+ return 1, {"hosts":h_list}
+ except nvExceptions.NotFound as e:
+ error_value=-vimconn.HTTP_Not_Found
+ error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+ except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ #TODO insert exception vimconn.HTTP_Unauthorized
+ #if reaching here is because an exception
+ if self.debug:
+ print "get_hosts_info " + error_text
+ return error_value, error_text
+
+ def get_hosts(self, vim_tenant):
+ '''Get the hosts and deployed instances
+ Returns the hosts content'''
+ r, hype_dict = self.get_hosts_info()
+ if r<0:
+ return r, hype_dict
+ hypervisors = hype_dict["hosts"]
+ try:
+ servers = self.nova.servers.list()
+ for hype in hypervisors:
+ for server in servers:
+ if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']==hype['hypervisor_hostname']:
+ if 'vm' in hype:
+ hype['vm'].append(server.id)
+ else:
+ hype['vm'] = [server.id]
+ return 1, hype_dict
+ except nvExceptions.NotFound as e:
+ error_value=-vimconn.HTTP_Not_Found
+ error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+ except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ error_value=-vimconn.HTTP_Bad_Request
+ error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0]))
+ #TODO insert exception vimconn.HTTP_Unauthorized
+ #if reaching here is because an exception
+ if self.debug:
+ print "get_hosts " + error_text
+ return error_value, error_text
+
+
--- /dev/null
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
- 'dhcp_count':50,
- 'subnet_address':"192.168.1.0/24",
+ # -*- coding: utf-8 -*-
+
+ ##
+ # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+
+ """
+ vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
+ mbayramov@vmware.com
+ """
+ from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
+
+ import vimconn
+ import os
+ import traceback
+ import itertools
+ import requests
+ import ssl
+ import atexit
+
+ from pyVmomi import vim, vmodl
+ from pyVim.connect import SmartConnect, Disconnect
+
+ from xml.etree import ElementTree as XmlElementTree
+ from lxml import etree as lxmlElementTree
+
+ import yaml
+ from pyvcloud import Http
+ from pyvcloud.vcloudair import VCA
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
+ vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
+ networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
+ from xml.sax.saxutils import escape
+
+ from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
+ from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
+
+ import logging
+ import json
+ import time
+ import uuid
+ import httplib
+ import hashlib
+ import socket
+ import struct
+ import netaddr
++import random
+
+ # global variable for vcd connector type
+ STANDALONE = 'standalone'
+
+ # key for flavor dicts
+ FLAVOR_RAM_KEY = 'ram'
+ FLAVOR_VCPUS_KEY = 'vcpus'
+ FLAVOR_DISK_KEY = 'disk'
- 'dhcp_start_address':"192.168.1.3",
- 'ip_version':"IPv4",
- 'dns_address':"192.168.1.2"
++DEFAULT_IP_PROFILE = {'dhcp_count':50,
+ 'dhcp_enabled':True,
- self.vcenter_ip = None
- self.vcenter_port = None
- self.vcenter_user = None
- self.vcenter_password = None
++ 'ip_version':"IPv4"
+ }
+ # global variable for wait time
+ INTERVAL_TIME = 5
+ MAX_WAIT_TIME = 1800
+
+ VCAVERSION = '5.9'
+
+ __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
+ __date__ = "$12-Jan-2017 11:09:29$"
+ __version__ = '0.1'
+
+ # -1: "Could not be created",
+ # 0: "Unresolved",
+ # 1: "Resolved",
+ # 2: "Deployed",
+ # 3: "Suspended",
+ # 4: "Powered on",
+ # 5: "Waiting for user input",
+ # 6: "Unknown state",
+ # 7: "Unrecognized state",
+ # 8: "Powered off",
+ # 9: "Inconsistent state",
+ # 10: "Children do not all have the same status",
+ # 11: "Upload initiated, OVF descriptor pending",
+ # 12: "Upload initiated, copying contents",
+ # 13: "Upload initiated , disk contents pending",
+ # 14: "Upload has been quarantined",
+ # 15: "Upload quarantine period has expired"
+
+ # mapping vCD status to MANO
+ vcdStatusCode2manoFormat = {4: 'ACTIVE',
+ 7: 'PAUSED',
+ 3: 'SUSPENDED',
+ 8: 'INACTIVE',
+ 12: 'BUILD',
+ -1: 'ERROR',
+ 14: 'DELETED'}
+
+ #
+ netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
+ 'ERROR': 'ERROR', 'DELETED': 'DELETED'
+ }
+
+ class vimconnector(vimconn.vimconnector):
+ # dict used to store flavor in memory
+ flavorlist = {}
+
+ def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
+ url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
+ """
+ Constructor create vmware connector to vCloud director.
+
+ By default construct doesn't validate connection state. So client can create object with None arguments.
+ If client specified username , password and host and VDC name. Connector initialize other missing attributes.
+
+ a) It initialize organization UUID
+ b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
+
+ Args:
+ uuid - is organization uuid.
+ name - is organization name that must be presented in vCloud director.
+ tenant_id - is VDC uuid it must be presented in vCloud director
+ tenant_name - is VDC name.
+ url - is hostname or ip address of vCloud director
+ url_admin - same as above.
+ user - is user that administrator for organization. Caller must make sure that
+ username has right privileges.
+
+ password - is password for a user.
+
+ VMware connector also requires PVDC administrative privileges and separate account.
+ This variables must be passed via config argument dict contains keys
+
+ dict['admin_username']
+ dict['admin_password']
+ config - Provide NSX and vCenter information
+
+ Returns:
+ Nothing.
+ """
+
+ vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
+ url_admin, user, passwd, log_level, config)
+
+ self.logger = logging.getLogger('openmano.vim.vmware')
+ self.logger.setLevel(10)
+ self.persistent_info = persistent_info
+
+ self.name = name
+ self.id = uuid
+ self.url = url
+ self.url_admin = url_admin
+ self.tenant_id = tenant_id
+ self.tenant_name = tenant_name
+ self.user = user
+ self.passwd = passwd
+ self.config = config
+ self.admin_password = None
+ self.admin_user = None
+ self.org_name = ""
+ self.nsx_manager = None
+ self.nsx_user = None
+ self.nsx_password = None
- vdcid = vdc.get_id().split(":")[3]
- networks = vca.get_networks(vdc.get_name())
- network_list = []
-
+
+ if tenant_name is not None:
+ orgnameandtenant = tenant_name.split(":")
+ if len(orgnameandtenant) == 2:
+ self.tenant_name = orgnameandtenant[1]
+ self.org_name = orgnameandtenant[0]
+ else:
+ self.tenant_name = tenant_name
+ if "orgname" in config:
+ self.org_name = config['orgname']
+
+ if log_level:
+ self.logger.setLevel(getattr(logging, log_level))
+
+ try:
+ self.admin_user = config['admin_username']
+ self.admin_password = config['admin_password']
+ except KeyError:
+ raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
+
+ try:
+ self.nsx_manager = config['nsx_manager']
+ self.nsx_user = config['nsx_user']
+ self.nsx_password = config['nsx_password']
+ except KeyError:
+ raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+
+ self.vcenter_ip = config.get("vcenter_ip", None)
+ self.vcenter_port = config.get("vcenter_port", None)
+ self.vcenter_user = config.get("vcenter_user", None)
+ self.vcenter_password = config.get("vcenter_password", None)
+
++# ############# Stub code for SRIOV #################
++# try:
++# self.dvs_name = config['dv_switch_name']
++# except KeyError:
++# raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
++#
++# self.vlanID_range = config.get("vlanID_range", None)
++
+ self.org_uuid = None
+ self.vca = None
+
+ if not url:
+ raise vimconn.vimconnException('url param can not be NoneType')
+
+ if not self.url_admin: # try to use normal url
+ self.url_admin = self.url
+
+ logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
+ self.tenant_id, self.tenant_name))
+ logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
+ logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
+
+ # initialize organization
+ if self.user is not None and self.passwd is not None and self.url:
+ self.init_organization()
+
+ def __getitem__(self, index):
+ if index == 'name':
+ return self.name
+ if index == 'tenant_id':
+ return self.tenant_id
+ if index == 'tenant_name':
+ return self.tenant_name
+ elif index == 'id':
+ return self.id
+ elif index == 'org_name':
+ return self.org_name
+ elif index == 'org_uuid':
+ return self.org_uuid
+ elif index == 'user':
+ return self.user
+ elif index == 'passwd':
+ return self.passwd
+ elif index == 'url':
+ return self.url
+ elif index == 'url_admin':
+ return self.url_admin
+ elif index == "config":
+ return self.config
+ else:
+ raise KeyError("Invalid key '%s'" % str(index))
+
+ def __setitem__(self, index, value):
+ if index == 'name':
+ self.name = value
+ if index == 'tenant_id':
+ self.tenant_id = value
+ if index == 'tenant_name':
+ self.tenant_name = value
+ elif index == 'id':
+ self.id = value
+ elif index == 'org_name':
+ self.org_name = value
+ elif index == 'org_uuid':
+ self.org_uuid = value
+ elif index == 'user':
+ self.user = value
+ elif index == 'passwd':
+ self.passwd = value
+ elif index == 'url':
+ self.url = value
+ elif index == 'url_admin':
+ self.url_admin = value
+ else:
+ raise KeyError("Invalid key '%s'" % str(index))
+
+ def connect_as_admin(self):
+ """ Method connect as pvdc admin user to vCloud director.
+ There are certain action that can be done only by provider vdc admin user.
+ Organization creation / provider network creation etc.
+
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
+ """
+
+ self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
+
+ vca_admin = VCA(host=self.url,
+ username=self.admin_user,
+ service_type=STANDALONE,
+ version=VCAVERSION,
+ verify=False,
+ log=False)
+ result = vca_admin.login(password=self.admin_password, org='System')
+ if not result:
+ raise vimconn.vimconnConnectionException(
+ "Can't connect to a vCloud director as: {}".format(self.admin_user))
+ result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
+ if result is True:
+ self.logger.info(
+ "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
+
+ return vca_admin
+
+ def connect(self):
+ """ Method connect as normal user to vCloud director.
+
+ Returns:
+ The return vca object that letter can be used to connect to vCloud director as admin for VDC
+ """
+
+ try:
+ self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
+ self.user,
+ self.org_name))
+ vca = VCA(host=self.url,
+ username=self.user,
+ service_type=STANDALONE,
+ version=VCAVERSION,
+ verify=False,
+ log=False)
+
+ result = vca.login(password=self.passwd, org=self.org_name)
+ if not result:
+ raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
+ result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
+ if result is True:
+ self.logger.info(
+ "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
+
+ except:
+ raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+ "{} as user: {}".format(self.org_name, self.user))
+
+ return vca
+
+ def init_organization(self):
+ """ Method initialize organization UUID and VDC parameters.
+
+ At bare minimum client must provide organization name that present in vCloud director and VDC.
+
+ The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
+ The Org - UUID will be initialized at the run time if data center present in vCloud director.
+
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin
+ """
+ try:
+ if self.org_uuid is None:
+ org_dict = self.get_org_list()
+ for org in org_dict:
+ # we set org UUID at the init phase but we can do it only when we have valid credential.
+ if org_dict[org] == self.org_name:
+ self.org_uuid = org
+ self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
+ break
+ else:
+ raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
+
+ # if well good we require for org details
+ org_details_dict = self.get_org(org_uuid=self.org_uuid)
+
+ # we have two case if we want to initialize VDC ID or VDC name at run time
+ # tenant_name provided but no tenant id
+ if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
+ vdcs_dict = org_details_dict['vdcs']
+ for vdc in vdcs_dict:
+ if vdcs_dict[vdc] == self.tenant_name:
+ self.tenant_id = vdc
+ self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+ self.org_name))
+ break
+ else:
+ raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
+ # case two we have tenant_id but we don't have tenant name so we find and set it.
+ if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
+ vdcs_dict = org_details_dict['vdcs']
+ for vdc in vdcs_dict:
+ if vdc == self.tenant_id:
+ self.tenant_name = vdcs_dict[vdc]
+ self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+ self.org_name))
+ break
+ else:
+ raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
+ self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
+ except:
+ self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
+ self.logger.debug(traceback.format_exc())
+ self.org_uuid = None
+
+ def new_tenant(self, tenant_name=None, tenant_description=None):
+ """ Method adds a new tenant to VIM with this name.
+ This action requires access to create VDC action in vCloud director.
+
+ Args:
+ tenant_name is tenant_name to be created.
+ tenant_description not used for this call
+
+ Return:
+ returns the tenant identifier in UUID format.
+ If action is failed method will throw vimconn.vimconnException method
+ """
+ vdc_task = self.create_vdc(vdc_name=tenant_name)
+ if vdc_task is not None:
+ vdc_uuid, value = vdc_task.popitem()
+ self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+ return vdc_uuid
+ else:
+ raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
+
+ def delete_tenant(self, tenant_id=None):
+ """Delete a tenant from VIM"""
+ 'Returns the tenant identifier'
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_tenant_list(self, filter_dict={}):
+ """Obtain tenants of VIM
+ filter_dict can contain the following keys:
+ name: filter by tenant name
+ id: filter by tenant uuid/id
+ <other VIM specific>
+ Returns the tenant list of dictionaries:
+ [{'name':'<name>, 'id':'<id>, ...}, ...]
+
+ """
+ org_dict = self.get_org(self.org_uuid)
+ vdcs_dict = org_dict['vdcs']
+
+ vdclist = []
+ try:
+ for k in vdcs_dict:
+ entry = {'name': vdcs_dict[k], 'id': k}
+ # if caller didn't specify dictionary we return all tenants.
+ if filter_dict is not None and filter_dict:
+ filtered_entry = entry.copy()
+ filtered_dict = set(entry.keys()) - set(filter_dict)
+ for unwanted_key in filtered_dict: del entry[unwanted_key]
+ if filter_dict == entry:
+ vdclist.append(filtered_entry)
+ else:
+ vdclist.append(entry)
+ except:
+ self.logger.debug("Error in get_tenant_list()")
+ self.logger.debug(traceback.format_exc())
+ raise vimconn.vimconnException("Incorrect state. {}")
+
+ return vdclist
+
+ def new_network(self, net_name, net_type, ip_profile=None, shared=False):
+ """Adds a tenant network to VIM
+ net_name is the name
+ net_type can be 'bridge','data'.'ptp'.
+ ip_profile is a dict containing the IP parameters of the network
+ shared is a boolean
+ Returns the network identifier"""
+
+ self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
+ .format(net_name, net_type, ip_profile, shared))
+
+ isshared = 'false'
+ if shared:
+ isshared = 'true'
+
++# ############# Stub code for SRIOV #################
++# if net_type == "data" or net_type == "ptp":
++# if self.config.get('dv_switch_name') == None:
++# raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
++# network_uuid = self.create_dvPort_group(net_name)
++
+ network_uuid = self.create_network(network_name=net_name, net_type=net_type,
+ ip_profile=ip_profile, isshared=isshared)
+ if network_uuid is not None:
+ return network_uuid
+ else:
+ raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
+
+ def get_vcd_network_list(self):
+ """ Method available organization for a logged in tenant
+
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin
+ """
+
+ self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not self.tenant_name:
+ raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
+
+ vdc_uuid = vdc.get_id().split(":")[3]
+ networks = vca.get_networks(vdc.get_name())
+ network_list = []
+ try:
+ for network in networks:
+ filter_dict = {}
+ netid = network.get_id().split(":")
+ if len(netid) != 4:
+ continue
+
+ filter_dict["name"] = network.get_name()
+ filter_dict["id"] = netid[3]
+ filter_dict["shared"] = network.get_IsShared()
+ filter_dict["tenant_id"] = vdc_uuid
+ if network.get_status() == 1:
+ filter_dict["admin_state_up"] = True
+ else:
+ filter_dict["admin_state_up"] = False
+ filter_dict["status"] = "ACTIVE"
+ filter_dict["type"] = "bridge"
+ network_list.append(filter_dict)
+ self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+ except:
+ self.logger.debug("Error in get_vcd_network_list")
+ self.logger.debug(traceback.format_exc())
+ pass
+
+ self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+ return network_list
+
+ def get_network_list(self, filter_dict={}):
+ """Obtain tenant networks of VIM
+ Filter_dict can be:
+ name: network name OR/AND
+ id: network uuid OR/AND
+ shared: boolean OR/AND
+ tenant_id: tenant OR/AND
+ admin_state_up: boolean
+ status: 'ACTIVE'
+
+ [{key : value , key : value}]
+
+ Returns the network list of dictionaries:
+ [{<the fields at Filter_dict plus some VIM specific>}, ...]
+ List can be empty
+ """
+
+ self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not self.tenant_name:
+ raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
+
- vdc = vca.get_vdc(self.tenant_name)
- vdc_id = vdc.get_id().split(":")[3]
+ try:
++ vdcid = vdc.get_id().split(":")[3]
++ networks = vca.get_networks(vdc.get_name())
++ network_list = []
++
+ for network in networks:
+ filter_entry = {}
+ net_uuid = network.get_id().split(":")
+ if len(net_uuid) != 4:
+ continue
+ else:
+ net_uuid = net_uuid[3]
+ # create dict entry
+ self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
+ vdcid,
+ network.get_name()))
+ filter_entry["name"] = network.get_name()
+ filter_entry["id"] = net_uuid
+ filter_entry["shared"] = network.get_IsShared()
+ filter_entry["tenant_id"] = vdcid
+ if network.get_status() == 1:
+ filter_entry["admin_state_up"] = True
+ else:
+ filter_entry["admin_state_up"] = False
+ filter_entry["status"] = "ACTIVE"
+ filter_entry["type"] = "bridge"
+ filtered_entry = filter_entry.copy()
+
+ if filter_dict is not None and filter_dict:
+ # we remove all the key : value we don't care and match only
+ # respected field
+ filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+ for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+ if filter_dict == filter_entry:
+ network_list.append(filtered_entry)
+ else:
+ network_list.append(filtered_entry)
+ except:
+ self.logger.debug("Error in get_vcd_network_list")
+ self.logger.debug(traceback.format_exc())
+
+ self.logger.debug("Returning {}".format(network_list))
+ return network_list
+
+ def get_network(self, net_id):
+ """Method obtains network details of net_id VIM network
+ Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
- networks = vca.get_networks(vdc.get_name())
- filter_dict = {}
++ try:
++ vdc = vca.get_vdc(self.tenant_name)
++ vdc_id = vdc.get_id().split(":")[3]
+
- try:
++ networks = vca.get_networks(vdc.get_name())
++ filter_dict = {}
+
- for catalog in vca.get_catalogs():
- if catalog_name != catalog.name:
- continue
- link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
- link.get_rel() == 'add', catalog.get_Link())
- assert len(link) == 1
- data = """
- <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
- """ % (escape(catalog_name), escape(description))
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
- response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
- if response.status_code == requests.codes.created:
- catalogItem = XmlElementTree.fromstring(response.content)
- entity = [child for child in catalogItem if
- child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
- href = entity.get('href')
- template = href
- response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify, logger=self.logger)
-
- if response.status_code == requests.codes.ok:
- media = mediaType.parseString(response.content, True)
- link = filter(lambda link: link.get_rel() == 'upload:default',
- media.get_Files().get_File()[0].get_Link())[0]
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Type'] = 'Content-Type text/xml'
- response = Http.put(link.get_href(),
- data=open(media_file_name, 'rb'),
- headers=headers,
+ for network in networks:
+ vdc_network_id = network.get_id().split(":")
+ if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+ filter_dict["name"] = network.get_name()
+ filter_dict["id"] = vdc_network_id[3]
+ filter_dict["shared"] = network.get_IsShared()
+ filter_dict["tenant_id"] = vdc_id
+ if network.get_status() == 1:
+ filter_dict["admin_state_up"] = True
+ else:
+ filter_dict["admin_state_up"] = False
+ filter_dict["status"] = "ACTIVE"
+ filter_dict["type"] = "bridge"
+ self.logger.debug("Returning {}".format(filter_dict))
+ return filter_dict
+ except:
+ self.logger.debug("Error in get_network")
+ self.logger.debug(traceback.format_exc())
+
+ return filter_dict
+
+ def delete_network(self, net_id):
+ """
+ Method Deletes a tenant network from VIM, provide the network id.
+
+ Returns the network identifier or raise an exception
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
+
++ # ############# Stub code for SRIOV #################
++# dvport_group = self.get_dvport_group(net_id)
++# if dvport_group:
++# #delete portgroup
++# status = self.destroy_dvport_group(net_id)
++# if status:
++# # Remove vlanID from persistent info
++# if net_id in self.persistent_info["used_vlanIDs"]:
++# del self.persistent_info["used_vlanIDs"][net_id]
++#
++# return net_id
++
+ vcd_network = self.get_vcd_network(network_uuid=net_id)
+ if vcd_network is not None and vcd_network:
+ if self.delete_network_action(network_uuid=net_id):
+ return net_id
+ else:
+ raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+
+ def refresh_nets_status(self, net_list):
+ """Get the status of the networks
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ dict_entry = {}
+ try:
+ for net in net_list:
+ errormsg = ''
+ vcd_network = self.get_vcd_network(network_uuid=net)
+ if vcd_network is not None and vcd_network:
+ if vcd_network['status'] == '1':
+ status = 'ACTIVE'
+ else:
+ status = 'DOWN'
+ else:
+ status = 'DELETED'
+ errormsg = 'Network not found.'
+
+ dict_entry[net] = {'status': status, 'error_msg': errormsg,
+ 'vim_info': yaml.safe_dump(vcd_network)}
+ except:
+ self.logger.debug("Error in refresh_nets_status")
+ self.logger.debug(traceback.format_exc())
+
+ return dict_entry
+
+ def get_flavor(self, flavor_id):
+ """Obtain flavor details from the VIM
+ Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+ """
+ if flavor_id not in vimconnector.flavorlist:
+ raise vimconn.vimconnNotFoundException("Flavor not found.")
+ return vimconnector.flavorlist[flavor_id]
+
+ def new_flavor(self, flavor_data):
+ """Adds a tenant flavor to VIM
+ flavor_data contains a dictionary with information, keys:
+ name: flavor name
+ ram: memory (cloud type) in MBytes
+ vpcus: cpus (cloud type)
+ extended: EPA parameters
+ - numas: #items requested in same NUMA
+ memory: number of 1G huge pages memory
+ paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+ interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+ - name: interface name
+ dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
+ bandwidth: X Gbps; requested guarantee bandwidth
+ vpci: requested virtual PCI address
+ disk: disk size
+ is_public:
+ #TODO to concrete
+ Returns the flavor identifier"""
+
+ # generate a new uuid put to internal dict and return it.
+ self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+ new_flavor=flavor_data
+ ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+ cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+ disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
+
+ extended_flv = flavor_data.get("extended")
+ if extended_flv:
+ numas=extended_flv.get("numas")
+ if numas:
+ for numa in numas:
+ #overwrite ram and vcpus
+ ram = numa['memory']*1024
+ if 'paired-threads' in numa:
+ cpu = numa['paired-threads']*2
+ elif 'cores' in numa:
+ cpu = numa['cores']
+ elif 'threads' in numa:
+ cpu = numa['threads']
+
+ new_flavor[FLAVOR_RAM_KEY] = ram
+ new_flavor[FLAVOR_VCPUS_KEY] = cpu
+ new_flavor[FLAVOR_DISK_KEY] = disk
+ # generate a new uuid put to internal dict and return it.
+ flavor_id = uuid.uuid4()
+ vimconnector.flavorlist[str(flavor_id)] = new_flavor
+ self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
+
+ return str(flavor_id)
+
+ def delete_flavor(self, flavor_id):
+ """Deletes a tenant flavor from VIM identify by its id
+
+ Returns the used id or raise an exception
+ """
+ if flavor_id not in vimconnector.flavorlist:
+ raise vimconn.vimconnNotFoundException("Flavor not found.")
+
+ vimconnector.flavorlist.pop(flavor_id, None)
+ return flavor_id
+
+ def new_image(self, image_dict):
+ """
+ Adds a tenant image to VIM
+ Returns:
+ 200, image-id if the image is created
+ <0, message if there is an error
+ """
+
+ return self.get_image_id_from_path(image_dict['location'])
+
+ def delete_image(self, image_id):
+ """
+
+ :param image_id:
+ :return:
+ """
+
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def catalog_exists(self, catalog_name, catalogs):
+ """
+
+ :param catalog_name:
+ :param catalogs:
+ :return:
+ """
+ for catalog in catalogs:
+ if catalog.name == catalog_name:
+ return True
+ return False
+
+ def create_vimcatalog(self, vca=None, catalog_name=None):
+ """ Create new catalog entry in vCloud director.
+
+ Args
+ vca: vCloud director.
+ catalog_name catalog that client wish to create. Note no validation done for a name.
+ Client must make sure that provide valid string representation.
+
+ Return (bool) True if catalog created.
+
+ """
+ try:
+ task = vca.create_catalog(catalog_name, catalog_name)
+ result = vca.block_until_completed(task)
+ if not result:
+ return False
+ catalogs = vca.get_catalogs()
+ except:
+ return False
+ return self.catalog_exists(catalog_name, catalogs)
+
+ # noinspection PyIncorrectDocstring
+ def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
+ description='', progress=False, chunk_bytes=128 * 1024):
+ """
+ Uploads a OVF file to a vCloud catalog
+
+ :param chunk_bytes:
+ :param progress:
+ :param description:
+ :param image_name:
+ :param vca:
+ :param catalog_name: (str): The name of the catalog to upload the media.
+ :param media_file_name: (str): The name of the local media file to upload.
+ :return: (bool) True if the media file was successfully uploaded, false otherwise.
+ """
+ os.path.isfile(media_file_name)
+ statinfo = os.stat(media_file_name)
+
+ # find a catalog entry where we upload OVF.
+ # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
+ # status change.
+ # if VCD can parse OVF we upload VMDK file
- if response.status_code != requests.codes.ok:
- self.logger.debug(
- "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
- media_file_name))
- return False
++ try:
++ for catalog in vca.get_catalogs():
++ if catalog_name != catalog.name:
++ continue
++ link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
++ link.get_rel() == 'add', catalog.get_Link())
++ assert len(link) == 1
++ data = """
++ <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
++ """ % (escape(catalog_name), escape(description))
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
++ response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
++ if response.status_code == requests.codes.created:
++ catalogItem = XmlElementTree.fromstring(response.content)
++ entity = [child for child in catalogItem if
++ child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
++ href = entity.get('href')
++ template = href
++ response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify, logger=self.logger)
- # TODO fix this with aync block
- time.sleep(5)
-
- self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
- # uploading VMDK file
- # check status of OVF upload and upload remaining files.
- response = Http.get(template,
- headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify,
- logger=self.logger)
+
- if response.status_code == requests.codes.ok:
- media = mediaType.parseString(response.content, True)
- number_of_files = len(media.get_Files().get_File())
- for index in xrange(0, number_of_files):
- links_list = filter(lambda link: link.get_rel() == 'upload:default',
- media.get_Files().get_File()[index].get_Link())
- for link in links_list:
- # we skip ovf since it already uploaded.
- if 'ovf' in link.get_href():
- continue
- # The OVF file and VMDK must be in a same directory
- head, tail = os.path.split(media_file_name)
- file_vmdk = head + '/' + link.get_href().split("/")[-1]
- if not os.path.isfile(file_vmdk):
- return False
- statinfo = os.stat(file_vmdk)
- if statinfo.st_size == 0:
- return False
- hrefvmdk = link.get_href()
-
- if progress:
- print("Uploading file: {}".format(file_vmdk))
- if progress:
- widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
- FileTransferSpeed()]
- progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
- bytes_transferred = 0
- f = open(file_vmdk, 'rb')
- while bytes_transferred < statinfo.st_size:
- my_bytes = f.read(chunk_bytes)
- if len(my_bytes) <= chunk_bytes:
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Range'] = 'bytes %s-%s/%s' % (
- bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
- headers['Content-Length'] = str(len(my_bytes))
- response = Http.put(hrefvmdk,
- headers=headers,
- data=my_bytes,
- verify=vca.verify,
- logger=None)
-
- if response.status_code == requests.codes.ok:
- bytes_transferred += len(my_bytes)
- if progress:
- progress_bar.update(bytes_transferred)
- else:
- self.logger.debug(
- 'file upload failed with error: [%s] %s' % (response.status_code,
- response.content))
-
- f.close()
- return False
- f.close()
- if progress:
- progress_bar.finish()
- time.sleep(10)
- return True
- else:
- self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
- format(catalog_name, media_file_name))
- return False
++ if response.status_code == requests.codes.ok:
++ media = mediaType.parseString(response.content, True)
++ link = filter(lambda link: link.get_rel() == 'upload:default',
++ media.get_Files().get_File()[0].get_Link())[0]
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Type'] = 'Content-Type text/xml'
++ response = Http.put(link.get_href(),
++ data=open(media_file_name, 'rb'),
++ headers=headers,
++ verify=vca.verify, logger=self.logger)
++ if response.status_code != requests.codes.ok:
++ self.logger.debug(
++ "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
++ media_file_name))
++ return False
++
++ # TODO fix this with aync block
++ time.sleep(5)
++
++ self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
++
++ # uploading VMDK file
++ # check status of OVF upload and upload remaining files.
++ response = Http.get(template,
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=self.logger)
+
- catalogs = vca.get_catalogs()
++ if response.status_code == requests.codes.ok:
++ media = mediaType.parseString(response.content, True)
++ number_of_files = len(media.get_Files().get_File())
++ for index in xrange(0, number_of_files):
++ links_list = filter(lambda link: link.get_rel() == 'upload:default',
++ media.get_Files().get_File()[index].get_Link())
++ for link in links_list:
++ # we skip ovf since it already uploaded.
++ if 'ovf' in link.get_href():
++ continue
++ # The OVF file and VMDK must be in a same directory
++ head, tail = os.path.split(media_file_name)
++ file_vmdk = head + '/' + link.get_href().split("/")[-1]
++ if not os.path.isfile(file_vmdk):
++ return False
++ statinfo = os.stat(file_vmdk)
++ if statinfo.st_size == 0:
++ return False
++ hrefvmdk = link.get_href()
++
++ if progress:
++ print("Uploading file: {}".format(file_vmdk))
++ if progress:
++ widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
++ FileTransferSpeed()]
++ progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
++
++ bytes_transferred = 0
++ f = open(file_vmdk, 'rb')
++ while bytes_transferred < statinfo.st_size:
++ my_bytes = f.read(chunk_bytes)
++ if len(my_bytes) <= chunk_bytes:
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Range'] = 'bytes %s-%s/%s' % (
++ bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
++ headers['Content-Length'] = str(len(my_bytes))
++ response = Http.put(hrefvmdk,
++ headers=headers,
++ data=my_bytes,
++ verify=vca.verify,
++ logger=None)
++
++ if response.status_code == requests.codes.ok:
++ bytes_transferred += len(my_bytes)
++ if progress:
++ progress_bar.update(bytes_transferred)
++ else:
++ self.logger.debug(
++ 'file upload failed with error: [%s] %s' % (response.status_code,
++ response.content))
++
++ f.close()
++ return False
++ f.close()
++ if progress:
++ progress_bar.finish()
++ time.sleep(10)
++ return True
++ else:
++ self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
++ format(catalog_name, media_file_name))
++ return False
++ except Exception as exp:
++ self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
++ .format(catalog_name,media_file_name, exp))
++ raise vimconn.vimconnException(
++ "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
++ .format(catalog_name,media_file_name, exp))
+
+ self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
+ return False
+
+ def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
+ """Upload media file"""
+ # TODO add named parameters for readability
+
+ return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
+ media_file_name=medial_file_name, description='medial_file_name', progress=progress)
+
+ def validate_uuid4(self, uuid_string=None):
+ """ Method validate correct format of UUID.
+
+ Return: true if string represent valid uuid
+ """
+ try:
+ val = uuid.UUID(uuid_string, version=4)
+ except ValueError:
+ return False
+ return True
+
+ def get_catalogid(self, catalog_name=None, catalogs=None):
+ """ Method check catalog and return catalog ID in UUID format.
+
+ Args
+ catalog_name: catalog name as string
+ catalogs: list of catalogs.
+
+ Return: catalogs uuid
+ """
+
+ for catalog in catalogs:
+ if catalog.name == catalog_name:
+ catalog_id = catalog.get_id().split(":")
+ return catalog_id[3]
+ return None
+
+ def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
+ """ Method check catalog and return catalog name lookup done by catalog UUID.
+
+ Args
+ catalog_name: catalog name as string
+ catalogs: list of catalogs.
+
+ Return: catalogs name or None
+ """
+
+ if not self.validate_uuid4(uuid_string=catalog_uuid):
+ return None
+
+ for catalog in catalogs:
+ catalog_id = catalog.get_id().split(":")[3]
+ if catalog_id == catalog_uuid:
+ return catalog.name
+ return None
+
++ def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
++ """ Method check catalog and return catalog name lookup done by catalog UUID.
++
++ Args
++ catalog_name: catalog name as string
++ catalogs: list of catalogs.
++
++ Return: catalogs name or None
++ """
++
++ if not self.validate_uuid4(uuid_string=catalog_uuid):
++ return None
++
++ for catalog in catalogs:
++ catalog_id = catalog.get_id().split(":")[3]
++ if catalog_id == catalog_uuid:
++ return catalog
++ return None
++
+ def get_image_id_from_path(self, path=None, progress=False):
+ """ Method upload OVF image to vCloud director.
+
+ Each OVF image represented as single catalog entry in vcloud director.
+ The method check for existing catalog entry. The check done by file name without file extension.
+
+ if given catalog name already present method will respond with existing catalog uuid otherwise
+ it will create new catalog entry and upload OVF file to newly created catalog.
+
+ If method can't create catalog entry or upload a file it will throw exception.
+
+ Method accept boolean flag progress that will output progress bar. It useful method
+ for standalone upload use case. In case to test large file upload.
+
+ Args
+ path: - valid path to OVF file.
+ progress - boolean progress bar show progress bar.
+
+ Return: if image uploaded correct method will provide image catalog UUID.
+ """
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not path:
+ raise vimconn.vimconnException("Image path can't be None.")
+
+ if not os.path.isfile(path):
+ raise vimconn.vimconnException("Can't read file. File not found.")
+
+ if not os.access(path, os.R_OK):
+ raise vimconn.vimconnException("Can't read file. Check file permission to read.")
+
+ self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
+
+ dirpath, filename = os.path.split(path)
+ flname, file_extension = os.path.splitext(path)
+ if file_extension != '.ovf':
+ self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
+ raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
+
+ catalog_name = os.path.splitext(filename)[0]
+ catalog_md5_name = hashlib.md5(path).hexdigest()
+ self.logger.debug("File name {} Catalog Name {} file path {} "
+ "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
+
- self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
- description, start, image_id, flavor_id, net_list, cloud_config))
++ try:
++ catalogs = vca.get_catalogs()
++ except Exception as exp:
++ self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
++ raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
++
+ if len(catalogs) == 0:
+ self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
+ result = self.create_vimcatalog(vca, catalog_md5_name)
+ if not result:
+ raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+ result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+ media_name=filename, medial_file_name=path, progress=progress)
+ if not result:
+ raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
+ return self.get_catalogid(catalog_name, vca.get_catalogs())
+ else:
+ for catalog in catalogs:
+ # search for existing catalog if we find same name we return ID
+ # TODO optimize this
+ if catalog.name == catalog_md5_name:
+ self.logger.debug("Found existing catalog entry for {} "
+ "catalog id {}".format(catalog_name,
+ self.get_catalogid(catalog_md5_name, catalogs)))
+ return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+
+ # if we didn't find existing catalog we create a new one and upload image.
+ self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
+ result = self.create_vimcatalog(vca, catalog_md5_name)
+ if not result:
+ raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+
+ result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+ media_name=filename, medial_file_name=path, progress=progress)
+ if not result:
+ raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
+
+ return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+
+ def get_image_list(self, filter_dict={}):
+ '''Obtain tenant images from VIM
+ Filter_dict can be:
+ name: image name
+ id: image uuid
+ checksum: image checksum
+ location: image path
+ Returns the image list of dictionaries:
+ [{<the fields at Filter_dict plus some VIM specific>}, ...]
+ List can be empty
+ '''
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+ try:
+ image_list = []
+ catalogs = vca.get_catalogs()
+ if len(catalogs) == 0:
+ return image_list
+ else:
+ for catalog in catalogs:
+ catalog_uuid = catalog.get_id().split(":")[3]
+ name = catalog.name
+ filtered_dict = {}
+ if filter_dict.get("name") and filter_dict["name"] != name:
+ continue
+ if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
+ continue
+ filtered_dict ["name"] = name
+ filtered_dict ["id"] = catalog_uuid
+ image_list.append(filtered_dict)
+
+ self.logger.debug("List of already created catalog items: {}".format(image_list))
+ return image_list
+ except Exception as exp:
+ raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
+
+ def get_vappid(self, vdc=None, vapp_name=None):
+ """ Method takes vdc object and vApp name and returns vapp uuid or None
+
+ Args:
+ vdc: The VDC object.
+ vapp_name: is application vappp name identifier
+
+ Returns:
+ The return vApp name otherwise None
+ """
+ if vdc is None or vapp_name is None:
+ return None
+ # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
+ try:
+ refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+ vdc.ResourceEntities.ResourceEntity)
+ if len(refs) == 1:
+ return refs[0].href.split("vapp")[1][1:]
+ except Exception as e:
+ self.logger.exception(e)
+ return False
+ return None
+
+ def check_vapp(self, vdc=None, vapp_uuid=None):
+ """ Method Method returns True or False if vapp deployed in vCloud director
+
+ Args:
+ vca: Connector to VCA
+ vdc: The VDC object.
+ vappid: vappid is application identifier
+
+ Returns:
+ The return True if vApp deployed
+ :param vdc:
+ :param vapp_uuid:
+ """
+ try:
+ refs = filter(lambda ref:
+ ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+ vdc.ResourceEntities.ResourceEntity)
+ for ref in refs:
+ vappid = ref.href.split("vapp")[1][1:]
+ # find vapp with respected vapp uuid
+ if vappid == vapp_uuid:
+ return True
+ except Exception as e:
+ self.logger.exception(e)
+ return False
+ return False
+
+ def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
+ """Method returns vApp name from vCD and lookup done by vapp_id.
+
+ Args:
+ vca: Connector to VCA
+ vdc: The VDC object.
+ vapp_uuid: vappid is application identifier
+
+ Returns:
+ The return vApp name otherwise None
+ """
+
+ try:
+ refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+ vdc.ResourceEntities.ResourceEntity)
+ for ref in refs:
+ # we care only about UUID the rest doesn't matter
+ vappid = ref.href.split("vapp")[1][1:]
+ if vappid == vapp_uuid:
+ response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
+ logger=self.logger)
+ tree = XmlElementTree.fromstring(response.content)
+ return tree.attrib['name']
+ except Exception as e:
+ self.logger.exception(e)
+ return None
+ return None
+
+ def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
+ cloud_config=None, disk_list=None):
+ """Adds a VM instance to VIM
+ Params:
+ start: indicates if VM must start or boot in pause mode. Ignored
+ image_id,flavor_id: image and flavor uuid
+ net_list: list of interfaces, each one is a dictionary with:
+ name:
+ net_id: network uuid to connect
+ vpci: virtual vcpi to assign
+ model: interface model, virtio, e2000, ...
+ mac_address:
+ use: 'data', 'bridge', 'mgmt'
+ type: 'virtual', 'PF', 'VF', 'VFnotShared'
+ vim_id: filled/added by this function
+ cloud_config: can be a text script to be passed directly to cloud-init,
+ or an object to inject users and ssh keys with format:
+ key-pairs: [] list of keys to install to the default user
+ users: [{ name, key-pairs: []}] list of users to add with their key-pair
+ #TODO ip, security groups
+ Returns >=0, the instance identifier
+ <0, error_text
+ """
+
+ self.logger.info("Creating new instance for entry {}".format(name))
- #
++ self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
++ description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ #new vm name = vmname + tenant_id + uuid
+ new_vm_name = [name, '-', str(uuid.uuid4())]
+ vmname_andid = ''.join(new_vm_name)
+
+ # if vm already deployed we return existing uuid
+ # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
+ # if vapp_uuid is not None:
+ # return vapp_uuid
+
+ # we check for presence of VDC, Catalog entry and Flavor.
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ raise vimconn.vimconnNotFoundException(
+ "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
+ catalogs = vca.get_catalogs()
+ if catalogs is None:
+ raise vimconn.vimconnNotFoundException(
+ "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
+
+ catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+ if catalog_hash_name:
+ self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
+ else:
+ raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+ "(Failed retrieve catalog information {})".format(name, image_id))
+
+
+ # Set vCPU and Memory based on flavor.
- pci_devices_info = []
+ vm_cpus = None
+ vm_memory = None
+ vm_disk = None
- if numas:
- for numa in numas:
- for interface in numa.get("interfaces",() ):
- if interface["dedicated"].strip()=="yes":
- pci_devices_info.append(interface)
++
+ if flavor_id is not None:
+ if flavor_id not in vimconnector.flavorlist:
+ raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+ "Failed retrieve flavor information "
+ "flavor id {}".format(name, flavor_id))
+ else:
+ try:
+ flavor = vimconnector.flavorlist[flavor_id]
+ vm_cpus = flavor[FLAVOR_VCPUS_KEY]
+ vm_memory = flavor[FLAVOR_RAM_KEY]
+ vm_disk = flavor[FLAVOR_DISK_KEY]
+ extended = flavor.get("extended", None)
+ if extended:
+ numas=extended.get("numas", None)
- vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
- self.get_catalogbyid(image_id, catalogs),
- network_name=None, # None while creating vapp
- network_mode=network_mode,
- vm_name=vmname_andid,
- vm_cpus=vm_cpus, # can be None if flavor is None
- vm_memory=vm_memory) # can be None if flavor is None
-
- if vapptask is None or vapptask is False:
- raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid))
- if type(vapptask) is VappTask:
- vca.block_until_completed(vapptask)
++
+ except Exception as exp:
+ raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
+
+ # image upload creates template name as catalog name space Template.
+ templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+ power_on = 'false'
+ if start:
+ power_on = 'true'
+
+ # client must provide at least one entry in net_list if not we report error
+ #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
+ #If no mgmt, then the 1st NN in netlist is considered as primary net.
+ primary_net = None
+ primary_netname = None
+ network_mode = 'bridged'
+ if net_list is not None and len(net_list) > 0:
+ for net in net_list:
+ if 'use' in net and net['use'] == 'mgmt':
+ primary_net = net
+ if primary_net is None:
+ primary_net = net_list[0]
+
+ try:
+ primary_net_id = primary_net['net_id']
+ network_dict = self.get_vcd_network(network_uuid=primary_net_id)
+ if 'name' in network_dict:
+ primary_netname = network_dict['name']
+
+ except KeyError:
+ raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+ else:
+ raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
+
+ # use: 'data', 'bridge', 'mgmt'
+ # create vApp. Set vcpu and ram based on flavor id.
- vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
- vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
++ try:
++ vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
++ self.get_catalogbyid(image_id, catalogs),
++ network_name=None, # None while creating vapp
++ network_mode=network_mode,
++ vm_name=vmname_andid,
++ vm_cpus=vm_cpus, # can be None if flavor is None
++ vm_memory=vm_memory) # can be None if flavor is None
++
++ if vapptask is None or vapptask is False:
++ raise vimconn.vimconnUnexpectedResponse(
++ "new_vminstance(): failed to create vApp {}".format(vmname_andid))
++ if type(vapptask) is VappTask:
++ vca.block_until_completed(vapptask)
++
++ except Exception as exp:
++ raise vimconn.vimconnUnexpectedResponse(
++ "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
+
+ # we should have now vapp in undeployed state.
- "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
++ try:
++ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
++ vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
++ except Exception as exp:
++ raise vimconn.vimconnUnexpectedResponse(
++ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
++ .format(vmname_andid, exp))
++
+ if vapp is None:
+ raise vimconn.vimconnUnexpectedResponse(
- #Add PCI passthrough configrations
- PCI_devices_status = False
++ "new_vminstance(): Failed to retrieve vApp {} after creation".format(
+ vmname_andid))
+
- si = None
++ #Add PCI passthrough/SRIOV configrations
+ vm_obj = None
- # add vm disk
++ pci_devices_info = []
++ sriov_net_info = []
++ reserve_memory = False
++
++ for net in net_list:
++ if net["type"]=="PF":
++ pci_devices_info.append(net)
++ elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net:
++ sriov_net_info.append(net)
++
++ #Add PCI
+ if len(pci_devices_info) > 0:
+ self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
+ vmname_andid ))
+ PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
+ pci_devices_info,
+ vmname_andid)
+ if PCI_devices_status:
+ self.logger.info("Added PCI devives {} to VM {}".format(
+ pci_devices_info,
+ vmname_andid)
+ )
++ reserve_memory = True
+ else:
+ self.logger.info("Fail to add PCI devives {} to VM {}".format(
+ pci_devices_info,
+ vmname_andid)
+ )
- self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
- task = vapp.connect_vms(nets[0].name,
- connection_index=nicIndex,
- connections_primary_index=primary_nic_index,
- ip_allocation_mode='DHCP')
- if type(task) is GenericTask:
- vca.block_until_completed(task)
++ # Modify vm disk
+ if vm_disk:
+ #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+ result = self.modify_vm_disk(vapp_uuid, vm_disk)
+ if result :
+ self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
+
++ #Add new or existing disks to vApp
++ if disk_list:
++ added_existing_disk = False
++ for disk in disk_list:
++ if "image_id" in disk and disk["image_id"] is not None:
++ self.logger.debug("Adding existing disk from image {} to vm {} ".format(
++ disk["image_id"] , vapp_uuid))
++ self.add_existing_disk(catalogs=catalogs,
++ image_id=disk["image_id"],
++ size = disk["size"],
++ template_name=templateName,
++ vapp_uuid=vapp_uuid
++ )
++ added_existing_disk = True
++ else:
++ #Wait till added existing disk gets reflected into vCD database/API
++ if added_existing_disk:
++ time.sleep(5)
++ added_existing_disk = False
++ self.add_new_disk(vca, vapp_uuid, disk['size'])
++
++ if numas:
++ # Assigning numa affinity setting
++ for numa in numas:
++ if 'paired-threads-id' in numa:
++ paired_threads_id = numa['paired-threads-id']
++ self.set_numa_affinity(vapp_uuid, paired_threads_id)
++
+ # add NICs & connect to networks in netlist
+ try:
+ self.logger.info("Request to connect VM to a network: {}".format(net_list))
+ nicIndex = 0
+ primary_nic_index = 0
+ for net in net_list:
+ # openmano uses network id in UUID format.
+ # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
+ # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
+ # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
+
+ if 'net_id' not in net:
+ continue
+
+ interface_net_id = net['net_id']
+ interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
+ interface_network_mode = net['use']
+
+ if interface_network_mode == 'mgmt':
+ primary_nic_index = nicIndex
+
+ """- POOL (A static IP address is allocated automatically from a pool of addresses.)
+ - DHCP (The IP address is obtained from a DHCP service.)
+ - MANUAL (The IP address is assigned manually in the IpAddress element.)
+ - NONE (No IP addressing mode specified.)"""
+
+ if primary_netname is not None:
+ nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
+ if len(nets) == 1:
+ self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
+ task = vapp.connect_to_network(nets[0].name, nets[0].href)
+ if type(task) is GenericTask:
+ vca.block_until_completed(task)
+ # connect network to VM - with all DHCP by default
- except KeyError:
- # it might be a case if specific mandatory entry in dict is empty
- self.logger.debug("Key error {}".format(KeyError.message))
- raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
++
++ type_list = ['PF','VF','VFnotShared']
++ if 'type' in net and net['type'] not in type_list:
++ # fetching nic type from vnf
++ if 'model' in net:
++ nic_type = net['model']
++ self.logger.info("new_vminstance(): adding network adapter "\
++ "to a network {}".format(nets[0].name))
++ self.add_network_adapter_to_vms(vapp, nets[0].name,
++ primary_nic_index,
++ nicIndex,
++ net,
++ nic_type=nic_type)
++ else:
++ self.logger.info("new_vminstance(): adding network adapter "\
++ "to a network {}".format(nets[0].name))
++ self.add_network_adapter_to_vms(vapp, nets[0].name,
++ primary_nic_index,
++ nicIndex,
++ net)
+ nicIndex += 1
- # deploy and power on vm
- self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
- deploytask = vapp.deploy(powerOn=False)
- if type(deploytask) is GenericTask:
- vca.block_until_completed(deploytask)
-
- # If VM has PCI devices reserve memory for VM
- if PCI_devices_status and vm_obj and vcenter_conect:
- memReserve = vm_obj.config.hardware.memoryMB
- spec = vim.vm.ConfigSpec()
- spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
- task = vm_obj.ReconfigVM_Task(spec=spec)
- if task:
- result = self.wait_for_vcenter_task(task, vcenter_conect)
- self.logger.info("Reserved memmoery {} MB for "\
- "VM VM status: {}".format(str(memReserve),result))
- else:
- self.logger.info("Fail to reserved memmoery {} to VM {}".format(
- str(memReserve),str(vm_obj)))
+
- self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
- poweron_task = vapp.poweron()
- if type(poweron_task) is GenericTask:
- vca.block_until_completed(poweron_task)
++ # cloud-init for ssh-key injection
++ if cloud_config:
++ self.cloud_init(vapp,cloud_config)
++
++ # deploy and power on vm
++ self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
++ deploytask = vapp.deploy(powerOn=False)
++ if type(deploytask) is GenericTask:
++ vca.block_until_completed(deploytask)
++
++ # ############# Stub code for SRIOV #################
++ #Add SRIOV
++# if len(sriov_net_info) > 0:
++# self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
++# vmname_andid ))
++# sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
++# sriov_net_info,
++# vmname_andid)
++# if sriov_status:
++# self.logger.info("Added SRIOV {} to VM {}".format(
++# sriov_net_info,
++# vmname_andid)
++# )
++# reserve_memory = True
++# else:
++# self.logger.info("Fail to add SRIOV {} to VM {}".format(
++# sriov_net_info,
++# vmname_andid)
++# )
++
++ # If VM has PCI devices or SRIOV reserve memory for VM
++ if reserve_memory:
++ memReserve = vm_obj.config.hardware.memoryMB
++ spec = vim.vm.ConfigSpec()
++ spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
++ task = vm_obj.ReconfigVM_Task(spec=spec)
++ if task:
++ result = self.wait_for_vcenter_task(task, vcenter_conect)
++ self.logger.info("Reserved memmoery {} MB for "\
++ "VM VM status: {}".format(str(memReserve),result))
++ else:
++ self.logger.info("Fail to reserved memmoery {} to VM {}".format(
++ str(memReserve),str(vm_obj)))
+
- vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
++ self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
++ poweron_task = vapp.poweron()
++ if type(poweron_task) is GenericTask:
++ vca.block_until_completed(poweron_task)
++
++ except Exception as exp :
++ # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
++ self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
++ raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
+
+ # check if vApp deployed and if that the case return vApp UUID otherwise -1
+ wait_time = 0
+ vapp_uuid = None
+ while wait_time <= MAX_WAIT_TIME:
- mac_ip_addr={}
- rheaders = {'Content-Type': 'application/xml'}
- iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
-
- try:
- for edge in iso_edges:
- nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
- self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
-
- resp = requests.get(self.nsx_manager + nsx_api_url,
- auth = (self.nsx_user, self.nsx_password),
- verify = False, headers = rheaders)
-
- if resp.status_code == requests.codes.ok:
- dhcp_leases = XmlElementTree.fromstring(resp.text)
- for child in dhcp_leases:
- if child.tag == 'dhcpLeaseInfo':
- dhcpLeaseInfo = child
- for leaseInfo in dhcpLeaseInfo:
- for elem in leaseInfo:
- if (elem.tag)=='macAddress':
- mac_addr = elem.text
- if (elem.tag)=='ipAddress':
- ip_addr = elem.text
- if (mac_addr) is not None:
- mac_ip_addr[mac_addr]= ip_addr
- self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
- else:
- self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
- except KeyError:
- self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
- self.logger.debug(traceback.format_exc())
-
++ try:
++ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
++ except Exception as exp:
++ raise vimconn.vimconnUnexpectedResponse(
++ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
++ .format(vmname_andid, exp))
++
+ if vapp and vapp.me.deployed:
+ vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+ break
+ else:
+ self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+ time.sleep(INTERVAL_TIME)
+
+ wait_time +=INTERVAL_TIME
+
+ if vapp_uuid is not None:
+ return vapp_uuid
+ else:
+ raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+
+ ##
+ ##
+ ## based on current discussion
+ ##
+ ##
+ ## server:
+ # created: '2016-09-08T11:51:58'
+ # description: simple-instance.linux1.1
+ # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+ # hostId: e836c036-74e7-11e6-b249-0800273e724c
+ # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+ # status: ACTIVE
+ # error_msg:
+ # interfaces: …
+ #
+ def get_vminstance(self, vim_vm_uuid=None):
+ """Returns the VM instance information from VIM"""
+
+ self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ raise vimconn.vimconnConnectionException(
+ "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+ vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
+ if not vm_info_dict:
+ self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+ raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+
+ status_key = vm_info_dict['status']
+ error = ''
+ try:
+ vm_dict = {'created': vm_info_dict['created'],
+ 'description': vm_info_dict['name'],
+ 'status': vcdStatusCode2manoFormat[int(status_key)],
+ 'hostId': vm_info_dict['vmuuid'],
+ 'error_msg': error,
+ 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+ if 'interfaces' in vm_info_dict:
+ vm_dict['interfaces'] = vm_info_dict['interfaces']
+ else:
+ vm_dict['interfaces'] = []
+ except KeyError:
+ vm_dict = {'created': '',
+ 'description': '',
+ 'status': vcdStatusCode2manoFormat[int(-1)],
+ 'hostId': vm_info_dict['vmuuid'],
+ 'error_msg': "Inconsistency state",
+ 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+ return vm_dict
+
+ def delete_vminstance(self, vm__vim_uuid):
+ """Method poweroff and remove VM instance from vcloud director network.
+
+ Args:
+ vm__vim_uuid: VM UUID
+
+ Returns:
+ Returns the instance identifier
+ """
+
+ self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name))
+ raise vimconn.vimconnException(
+ "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+ try:
+ vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+ if vapp_name is None:
+ self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+ else:
+ self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+
+ # Delete vApp and wait for status change if task executed and vApp is None.
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+
+ if vapp:
+ if vapp.me.deployed:
+ self.logger.info("Powering off vApp {}".format(vapp_name))
+ #Power off vApp
+ powered_off = False
+ wait_time = 0
+ while wait_time <= MAX_WAIT_TIME:
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+ if not vapp:
+ self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+ power_off_task = vapp.poweroff()
+ if type(power_off_task) is GenericTask:
+ result = vca.block_until_completed(power_off_task)
+ if result:
+ powered_off = True
+ break
+ else:
+ self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+ time.sleep(INTERVAL_TIME)
+
+ wait_time +=INTERVAL_TIME
+ if not powered_off:
+ self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+ else:
+ self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+
+ #Undeploy vApp
+ self.logger.info("Undeploy vApp {}".format(vapp_name))
+ wait_time = 0
+ undeployed = False
+ while wait_time <= MAX_WAIT_TIME:
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+ if not vapp:
+ self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+ undeploy_task = vapp.undeploy(action='powerOff')
+
+ if type(undeploy_task) is GenericTask:
+ result = vca.block_until_completed(undeploy_task)
+ if result:
+ undeployed = True
+ break
+ else:
+ self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+ time.sleep(INTERVAL_TIME)
+
+ wait_time +=INTERVAL_TIME
+
+ if not undeployed:
+ self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
+
+ # delete vapp
+ self.logger.info("Start deletion of vApp {} ".format(vapp_name))
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+
+ if vapp is not None:
+ wait_time = 0
+ result = False
+
+ while wait_time <= MAX_WAIT_TIME:
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+ if not vapp:
+ self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+ delete_task = vapp.delete()
+
+ if type(delete_task) is GenericTask:
+ vca.block_until_completed(delete_task)
+ result = vca.block_until_completed(delete_task)
+ if result:
+ break
+ else:
+ self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+ time.sleep(INTERVAL_TIME)
+
+ wait_time +=INTERVAL_TIME
+
+ if not result:
+ self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
+
+ except:
+ self.logger.debug(traceback.format_exc())
+ raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+
+ if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
+ self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+ return vm__vim_uuid
+ else:
+ raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+
+ def refresh_vms_status(self, vm_list):
+ """Get the status of the virtual machines and their interfaces/ports
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # CREATING (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces:
+ - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ """
+
+ self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
+
- the_vapp = vca.get_vapp(vdc, vmname)
- vm_info = the_vapp.get_vms_details()
- vm_status = vm_info[0]['status']
- vm_pci_details = self.get_vm_pci_details(vmuuid)
- vm_info[0].update(vm_pci_details)
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+ vms_dict = {}
++ nsx_edge_list = []
+ for vmuuid in vm_list:
+ vmname = self.get_namebyvappid(vca, vdc, vmuuid)
+ if vmname is not None:
+
- vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
- 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
- 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
++ try:
++ the_vapp = vca.get_vapp(vdc, vmname)
++ vm_info = the_vapp.get_vms_details()
++ vm_status = vm_info[0]['status']
++ vm_pci_details = self.get_vm_pci_details(vmuuid)
++ vm_info[0].update(vm_pci_details)
+
- # get networks
- try:
++ vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
++ 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
++ 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+
- for mac_adres,ip_adres in mac_ip_addr.iteritems():
- if mac_adres == vm_network['mac']:
- vm_network['ip']=ip_adres
++ # get networks
+ vm_app_networks = the_vapp.get_vms_network_info()
+ for vapp_network in vm_app_networks:
+ for vm_network in vapp_network:
+ if vm_network['name'] == vmname:
+ #Assign IP Address based on MAC Address in NSX DHCP lease info
- "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
- "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
++ if vm_network['ip'] is None:
++ if not nsx_edge_list:
++ nsx_edge_list = self.get_edge_details()
++ if nsx_edge_list is None:
++ raise vimconn.vimconnException("refresh_vms_status:"\
++ "Failed to get edge details from NSX Manager")
++ if vm_network['mac'] is not None:
++ vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
++
++ vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
+ interface = {"mac_address": vm_network['mac'],
- except KeyError:
- self.logger.debug("Error in respond {}".format(KeyError.message))
++ "vim_net_id": vm_net_id,
++ "vim_interface_id": vm_net_id,
+ 'ip_address': vm_network['ip']}
+ # interface['vim_info'] = yaml.safe_dump(vm_network)
+ vm_dict["interfaces"].append(interface)
+ # add a vm to vm dict
+ vms_dict.setdefault(vmuuid, vm_dict)
- self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
++ except Exception as exp:
++ self.logger.debug("Error in response {}".format(exp))
+ self.logger.debug(traceback.format_exc())
+
+ return vms_dict
+
++
++ def get_edge_details(self):
++ """Get the NSX edge list from NSX Manager
++ Returns list of NSX edges
++ """
++ edge_list = []
++ rheaders = {'Content-Type': 'application/xml'}
++ nsx_api_url = '/api/4.0/edges'
++
++ self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
++
++ try:
++ resp = requests.get(self.nsx_manager + nsx_api_url,
++ auth = (self.nsx_user, self.nsx_password),
++ verify = False, headers = rheaders)
++ if resp.status_code == requests.codes.ok:
++ paged_Edge_List = XmlElementTree.fromstring(resp.text)
++ for edge_pages in paged_Edge_List:
++ if edge_pages.tag == 'edgePage':
++ for edge_summary in edge_pages:
++ if edge_summary.tag == 'pagingInfo':
++ for element in edge_summary:
++ if element.tag == 'totalCount' and element.text == '0':
++ raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
++ .format(self.nsx_manager))
++
++ if edge_summary.tag == 'edgeSummary':
++ for element in edge_summary:
++ if element.tag == 'id':
++ edge_list.append(element.text)
++ else:
++ raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
++ .format(self.nsx_manager))
++
++ if not edge_list:
++ raise vimconn.vimconnException("get_edge_details: "\
++ "No NSX edge details found: {}"
++ .format(self.nsx_manager))
++ else:
++ self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
++ return edge_list
++ else:
++ self.logger.debug("get_edge_details: "
++ "Failed to get NSX edge details from NSX Manager: {}"
++ .format(resp.content))
++ return None
++
++ except Exception as exp:
++ self.logger.debug("get_edge_details: "\
++ "Failed to get NSX edge details from NSX Manager: {}"
++ .format(exp))
++ raise vimconn.vimconnException("get_edge_details: "\
++ "Failed to get NSX edge details from NSX Manager: {}"
++ .format(exp))
++
++
++ def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
++ """Get IP address details from NSX edges, using the MAC address
++ PARAMS: nsx_edges : List of NSX edges
++ mac_address : Find IP address corresponding to this MAC address
++ Returns: IP address corrresponding to the provided MAC address
++ """
++
++ ip_addr = None
++ rheaders = {'Content-Type': 'application/xml'}
++
++ self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
++
++ try:
++ for edge in nsx_edges:
++ nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
++
++ resp = requests.get(self.nsx_manager + nsx_api_url,
++ auth = (self.nsx_user, self.nsx_password),
++ verify = False, headers = rheaders)
++
++ if resp.status_code == requests.codes.ok:
++ dhcp_leases = XmlElementTree.fromstring(resp.text)
++ for child in dhcp_leases:
++ if child.tag == 'dhcpLeaseInfo':
++ dhcpLeaseInfo = child
++ for leaseInfo in dhcpLeaseInfo:
++ for elem in leaseInfo:
++ if (elem.tag)=='macAddress':
++ edge_mac_addr = elem.text
++ if (elem.tag)=='ipAddress':
++ ip_addr = elem.text
++ if edge_mac_addr is not None:
++ if edge_mac_addr == mac_address:
++ self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
++ .format(ip_addr, mac_address,edge))
++ return ip_addr
++ else:
++ self.logger.debug("get_ipaddr_from_NSXedge: "\
++ "Error occurred while getting DHCP lease info from NSX Manager: {}"
++ .format(resp.content))
++
++ self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
++ return None
++
++ except XmlElementTree.ParseError as Err:
++ self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
++
++
+ def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
+ """Send and action over a VM instance from VIM
+ Returns the vm_id if the action was successfully sent to the VIM"""
+
+ self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
+ if vm__vim_uuid is None or action_dict is None:
+ raise vimconn.vimconnException("Invalid request. VM id or action is None.")
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ vdc = vca.get_vdc(self.tenant_name)
+ if vdc is None:
+ return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
+
+ vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+ if vapp_name is None:
+ self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ else:
+ self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+
+ try:
+ the_vapp = vca.get_vapp(vdc, vapp_name)
+ # TODO fix all status
+ if "start" in action_dict:
+ vm_info = the_vapp.get_vms_details()
+ vm_status = vm_info[0]['status']
- if power_on_task is not None and type(power_on_task) is GenericTask:
- result = vca.block_until_completed(power_on_task)
- if result:
- self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
- elif "rebuild" in action_dict:
- self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
- power_on_task = the_vapp.deploy(powerOn=True)
- if type(power_on_task) is GenericTask:
++ self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+ if vm_status == "Suspended" or vm_status == "Powered off":
+ power_on_task = the_vapp.poweron()
- if result:
- self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+ result = vca.block_until_completed(power_on_task)
- pass
- ## server.pause()
++ self.instance_actions_result("start", result, vapp_name)
++ elif "rebuild" in action_dict:
++ self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
++ rebuild_task = the_vapp.deploy(powerOn=True)
++ result = vca.block_until_completed(rebuild_task)
++ self.instance_actions_result("rebuild", result, vapp_name)
+ elif "pause" in action_dict:
- pass
- ## server.resume()
++ self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
++ pause_task = the_vapp.undeploy(action='suspend')
++ result = vca.block_until_completed(pause_task)
++ self.instance_actions_result("pause", result, vapp_name)
+ elif "resume" in action_dict:
- if type(power_off_task) is GenericTask:
- result = vca.block_until_completed(power_off_task)
- if result:
- self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
++ self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
++ power_task = the_vapp.poweron()
++ result = vca.block_until_completed(power_task)
++ self.instance_actions_result("resume", result, vapp_name)
+ elif "shutoff" in action_dict or "shutdown" in action_dict:
++ action_name , value = action_dict.items()[0]
++ self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+ power_off_task = the_vapp.undeploy(action='powerOff')
- self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
++ result = vca.block_until_completed(power_off_task)
++ if action_name == "shutdown":
++ self.instance_actions_result("shutdown", result, vapp_name)
+ else:
- the_vapp.reset()
- elif "terminate" in action_dict:
- the_vapp.delete()
- # elif "createImage" in action_dict:
- # server.create_image()
++ self.instance_actions_result("shutoff", result, vapp_name)
+ elif "forceOff" in action_dict:
- pass
- except:
- pass
++ result = the_vapp.undeploy(action='force')
++ self.instance_actions_result("forceOff", result, vapp_name)
++ elif "reboot" in action_dict:
++ self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
++ reboot_task = the_vapp.reboot()
+ else:
- content = self.get_network_action(network_uuid=network_uuid)
++ raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
++ return vm__vim_uuid
++ except Exception as exp :
++ self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
++ raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
++
++ def instance_actions_result(self, action, result, vapp_name):
++ if result:
++ self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
++ else:
++ self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+
+ def get_vminstance_console(self, vm_id, console_type="vnc"):
+ """
+ Get a console for the virtual machine
+ Params:
+ vm_id: uuid of the VM
+ console_type, can be:
+ "novnc" (by default), "xvpvnc" for VNC types,
+ "rdp-html5" for RDP types, "spice-html5" for SPICE types
+ Returns dict with the console parameters:
+ protocol: ssh, ftp, http, https, ...
+ server: usually ip address
+ port: the http, ssh, ... port
+ suffix: extra text, e.g. the http path and query string
+ """
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ # NOT USED METHODS in current version
+
+ def host_vim2gui(self, host, server_dict):
+ """Transform host dictionary from VIM format to GUI format,
+ and append to the server_dict
+ """
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_hosts_info(self):
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_hosts(self, vim_tenant):
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_processor_rankings(self):
+ """Get the processor rankings in the VIM database"""
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def new_host(self, host_data):
+ """Adds a new host to VIM"""
+ '''Returns status code of the VIM response'''
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def new_external_port(self, port_data):
+ """Adds a external port to VIM"""
+ '''Returns the port identifier'''
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def new_external_network(self, net_name, net_type):
+ """Adds a external network to VIM (shared)"""
+ '''Returns the network identifier'''
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def connect_port_network(self, port_id, network_id, admin=False):
+ """Connects a external port to a network"""
+ '''Returns status code of the VIM response'''
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def new_vminstancefromJSON(self, vm_data):
+ """Adds a VM instance to VIM"""
+ '''Returns the instance identifier'''
+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_network_name_by_id(self, network_uuid=None):
+ """Method gets vcloud director network named based on supplied uuid.
+
+ Args:
+ network_uuid: network_id
+
+ Returns:
+ The return network name.
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not network_uuid:
+ return None
+
+ try:
+ org_dict = self.get_org(self.org_uuid)
+ if 'networks' in org_dict:
+ org_network_dict = org_dict['networks']
+ for net_uuid in org_network_dict:
+ if net_uuid == network_uuid:
+ return org_network_dict[net_uuid]
+ except:
+ self.logger.debug("Exception in get_network_name_by_id")
+ self.logger.debug(traceback.format_exc())
+
+ return None
+
+ def get_network_id_by_name(self, network_name=None):
+ """Method gets vcloud director network uuid based on supplied name.
+
+ Args:
+ network_name: network_name
+ Returns:
+ The return network uuid.
+ network_uuid: network_id
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not network_name:
+ self.logger.debug("get_network_id_by_name() : Network name is empty")
+ return None
+
+ try:
+ org_dict = self.get_org(self.org_uuid)
+ if org_dict and 'networks' in org_dict:
+ org_network_dict = org_dict['networks']
+ for net_uuid,net_name in org_network_dict.iteritems():
+ if net_name == network_name:
+ return net_uuid
+
+ except KeyError as exp:
+ self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+
+ return None
+
+ def list_org_action(self):
+ """
+ Method leverages vCloud director and query for available organization for particular user
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ url_list = [vca.host, '/api/org']
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_org_action(self, org_uuid=None):
+ """
+ Method leverages vCloud director and retrieve available object fdr organization.
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if org_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/org/', org_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_org(self, org_uuid=None):
+ """
+ Method retrieves available organization in vCloud Director
+
+ Args:
+ org_uuid - is a organization uuid.
+
+ Returns:
+ The return dictionary with following key
+ "network" - for network list under the org
+ "catalogs" - for network list under the org
+ "vdcs" - for vdc list under org
+ """
+
+ org_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if org_uuid is None:
+ return org_dict
+
+ content = self.get_org_action(org_uuid=org_uuid)
+ try:
+ vdc_list = {}
+ network_list = {}
+ catalog_list = {}
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for child in vm_list_xmlroot:
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
+ vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['vdcs'] = vdc_list
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
+ network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['networks'] = network_list
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
+ catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['catalogs'] = catalog_list
+ except:
+ pass
+
+ return org_dict
+
+ def get_org_list(self):
+ """
+ Method retrieves available organization in vCloud Director
+
+ Args:
+ vca - is active VCA connection.
+
+ Returns:
+ The return dictionary and key for each entry VDC UUID
+ """
+
+ org_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ content = self.list_org_action()
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'Org':
+ org_uuid = vm_xml.attrib['href'].split('/')[-1:]
+ org_dict[org_uuid[0]] = vm_xml.attrib['name']
+ except:
+ pass
+
+ return org_dict
+
+ def vms_view_action(self, vdc_name=None):
+ """ Method leverages vCloud director vms query call
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+ vca = self.connect()
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/vms/query']
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
+ vca.vcloud_session.organization.Link)
+ if len(refs) == 1:
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_vapp_list(self, vdc_name=None):
+ """
+ Method retrieves vApp list deployed vCloud director and returns a dictionary
+ contains a list of all vapp deployed for queried VDC.
+ The key for a dictionary is vApp UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+
+ vapp_dict = {}
+ if vdc_name is None:
+ return vapp_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord':
+ if vm_xml.attrib['isVAppTemplate'] == 'true':
+ rawuuid = vm_xml.attrib['container'].split('/')[-1:]
+ if 'vappTemplate-' in rawuuid[0]:
+ # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+ except:
+ pass
+
+ return vapp_dict
+
+ def get_vm_list(self, vdc_name=None):
+ """
+ Method retrieves VM's list deployed vCloud director. It returns a dictionary
+ contains a list of all VM's deployed for queried VDC.
+ The key for a dictionary is VM UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vm_dict = {}
+
+ if vdc_name is None:
+ return vm_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord':
+ if vm_xml.attrib['isVAppTemplate'] == 'false':
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ except:
+ pass
+
+ return vm_dict
+
+ def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
+ """
+ Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
+ contains a list of all VM's deployed for queried VDC.
+ The key for a dictionary is VM UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vm_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if vdc_name is None:
+ return vm_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+ # lookup done by UUID
+ if isuuid:
+ if vapp_name in vm_xml.attrib['container']:
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ break
+ # lookup done by Name
+ else:
+ if vapp_name in vm_xml.attrib['name']:
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ break
+ except:
+ pass
+
+ return vm_dict
+
+ def get_network_action(self, network_uuid=None):
+ """
+ Method leverages vCloud director and query network based on network uuid
+
+ Args:
+ vca - is active VCA connection.
+ network_uuid - is a network uuid
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if network_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_vcd_network(self, network_uuid=None):
+ """
+ Method retrieves available network from vCloud Director
+
+ Args:
+ network_uuid - is VCD network UUID
+
+ Each element serialized as key : value pair
+
+ Following keys available for access. network_configuration['Gateway'}
+ <Configuration>
+ <IpScopes>
+ <IpScope>
+ <IsInherited>true</IsInherited>
+ <Gateway>172.16.252.100</Gateway>
+ <Netmask>255.255.255.0</Netmask>
+ <Dns1>172.16.254.201</Dns1>
+ <Dns2>172.16.254.202</Dns2>
+ <DnsSuffix>vmwarelab.edu</DnsSuffix>
+ <IsEnabled>true</IsEnabled>
+ <IpRanges>
+ <IpRange>
+ <StartAddress>172.16.252.1</StartAddress>
+ <EndAddress>172.16.252.99</EndAddress>
+ </IpRange>
+ </IpRanges>
+ </IpScope>
+ </IpScopes>
+ <FenceMode>bridged</FenceMode>
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+
+ network_configuration = {}
+ if network_uuid is None:
+ return network_uuid
+
- except:
- pass
+ try:
++ content = self.get_network_action(network_uuid=network_uuid)
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+
+ network_configuration['status'] = vm_list_xmlroot.get("status")
+ network_configuration['name'] = vm_list_xmlroot.get("name")
+ network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'IsShared':
+ network_configuration['isShared'] = child.text.strip()
+ if child.tag.split("}")[1] == 'Configuration':
+ for configuration in child.iter():
+ tagKey = configuration.tag.split("}")[1].strip()
+ if tagKey != "":
+ network_configuration[tagKey] = configuration.text.strip()
+ return network_configuration
- self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
++ except Exception as exp :
++ self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
++ raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
+
+ return network_configuration
+
+ def delete_network_action(self, network_uuid=None):
+ """
+ Method delete given network from vCloud director
+
+ Args:
+ network_uuid - is a network uuid that client wish to delete
+
+ Returns:
+ The return None or XML respond or false
+ """
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if network_uuid is None:
+ return False
+
+ url_list = [vca.host, '/api/admin/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.delete(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == 202:
+ return True
+
+ return False
+
+ def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+ ip_profile=None, isshared='true'):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ net_type - can be 'bridge','data','ptp','mgmt'.
+ ip_profile is a dict containing the IP parameters of the network
+ isshared - is a boolean
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ new_network_name = [network_name, '-', str(uuid.uuid4())]
+ content = self.create_network_rest(network_name=''.join(new_network_name),
+ ip_profile=ip_profile,
+ net_type=net_type,
+ parent_network_uuid=parent_network_uuid,
+ isshared=isshared)
+ if content is None:
+ self.logger.debug("Failed create network {}.".format(network_name))
+ return None
+
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ vcd_uuid = vm_list_xmlroot.get('id').split(":")
+ if len(vcd_uuid) == 4:
- #Configure IP profile of the network
- ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
- gateway_address=ip_profile['gateway_address']
- dhcp_count=int(ip_profile['dhcp_count'])
- subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
++ self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+ return vcd_uuid[3]
+ except:
+ self.logger.debug("Failed create network {}".format(network_name))
+ return None
+
+ def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+ ip_profile=None, isshared='true'):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ net_type - can be 'bridge','data','ptp','mgmt'.
+ ip_profile is a dict containing the IP parameters of the network
+ isshared - is a boolean
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+ if network_name is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
+ vm_list_rest_call = ''.join(url_list)
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ provider_network = None
+ available_networks = None
+ add_vdc_rest_url = None
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+ else:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'ProviderVdcReference':
+ provider_network = child.attrib.get('href')
+ # application/vnd.vmware.admin.providervdc+xml
+ if child.tag.split("}")[1] == 'Link':
+ if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
+ and child.attrib.get('rel') == 'add':
+ add_vdc_rest_url = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ # find pvdc provided available network
+ response = Http.get(url=provider_network,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+
+ # available_networks.split("/")[-1]
+
+ if parent_network_uuid is None:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot.iter():
+ if child.tag.split("}")[1] == 'AvailableNetworks':
+ for networks in child.iter():
+ # application/vnd.vmware.admin.network+xml
+ if networks.attrib.get('href') is not None:
+ available_networks = networks.attrib.get('href')
+ break
+ except:
+ return None
+
- if ip_profile['dhcp_enabled']==True:
- dhcp_enabled='true'
- else:
- dhcp_enabled='false'
- dhcp_start_address=ip_profile['dhcp_start_address']
++ try:
++ #Configure IP profile of the network
++ ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+
- #derive dhcp_end_address from dhcp_start_address & dhcp_count
- end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
- end_ip_int += dhcp_count - 1
- dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
++ if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
++ subnet_rand = random.randint(0, 255)
++ ip_base = "192.168.{}.".format(subnet_rand)
++ ip_profile['subnet_address'] = ip_base + "0/24"
++ else:
++ ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
++
++ if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
++ ip_profile['gateway_address']=ip_base + "1"
++ if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
++ ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
++ if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
++ ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
++ if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
++ ip_profile['dhcp_start_address']=ip_base + "3"
++ if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
++ ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
++ if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
++ ip_profile['dns_address']=ip_base + "2"
++
++ gateway_address=ip_profile['gateway_address']
++ dhcp_count=int(ip_profile['dhcp_count'])
++ subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
++
++ if ip_profile['dhcp_enabled']==True:
++ dhcp_enabled='true'
++ else:
++ dhcp_enabled='false'
++ dhcp_start_address=ip_profile['dhcp_start_address']
+
- ip_version=ip_profile['ip_version']
- dns_address=ip_profile['dns_address']
++ #derive dhcp_end_address from dhcp_start_address & dhcp_count
++ end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
++ end_ip_int += dhcp_count - 1
++ dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+
- if net_type=='ptp':
- fence_mode="isolated"
- isshared='false'
- is_inherited='false'
- data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
- <Description>Openmano created</Description>
- <Configuration>
- <IpScopes>
- <IpScope>
- <IsInherited>{1:s}</IsInherited>
- <Gateway>{2:s}</Gateway>
- <Netmask>{3:s}</Netmask>
- <Dns1>{4:s}</Dns1>
- <IsEnabled>{5:s}</IsEnabled>
- <IpRanges>
- <IpRange>
- <StartAddress>{6:s}</StartAddress>
- <EndAddress>{7:s}</EndAddress>
- </IpRange>
- </IpRanges>
- </IpScope>
- </IpScopes>
- <FenceMode>{8:s}</FenceMode>
- </Configuration>
- <IsShared>{9:s}</IsShared>
- </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
- subnet_address, dns_address, dhcp_enabled,
- dhcp_start_address, dhcp_end_address, fence_mode, isshared)
-
- else:
- fence_mode="bridged"
- is_inherited='false'
- data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
- <Description>Openmano created</Description>
- <Configuration>
- <IpScopes>
- <IpScope>
- <IsInherited>{1:s}</IsInherited>
- <Gateway>{2:s}</Gateway>
- <Netmask>{3:s}</Netmask>
- <Dns1>{4:s}</Dns1>
- <IsEnabled>{5:s}</IsEnabled>
- <IpRanges>
- <IpRange>
- <StartAddress>{6:s}</StartAddress>
- <EndAddress>{7:s}</EndAddress>
- </IpRange>
- </IpRanges>
- </IpScope>
- </IpScopes>
- <ParentNetwork href="{8:s}"/>
- <FenceMode>{9:s}</FenceMode>
- </Configuration>
- <IsShared>{10:s}</IsShared>
- </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
- subnet_address, dns_address, dhcp_enabled,
- dhcp_start_address, dhcp_end_address, available_networks,
- fence_mode, isshared)
++ ip_version=ip_profile['ip_version']
++ dns_address=ip_profile['dns_address']
++ except KeyError as exp:
++ self.logger.debug("Create Network REST: Key error {}".format(exp))
++ raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
+
+ # either use client provided UUID or search for a first available
+ # if both are not defined we return none
+ if parent_network_uuid is not None:
+ url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
+ add_vdc_rest_url = ''.join(url_list)
+
- self.logger.debug("Create Network POST REST API call failed. Return status code {}"
- .format(response.status_code))
++ #Creating all networks as Direct Org VDC type networks.
++ #Unused in case of Underlay (data/ptp) network interface.
++ fence_mode="bridged"
++ is_inherited='false'
++ data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
++ <Description>Openmano created</Description>
++ <Configuration>
++ <IpScopes>
++ <IpScope>
++ <IsInherited>{1:s}</IsInherited>
++ <Gateway>{2:s}</Gateway>
++ <Netmask>{3:s}</Netmask>
++ <Dns1>{4:s}</Dns1>
++ <IsEnabled>{5:s}</IsEnabled>
++ <IpRanges>
++ <IpRange>
++ <StartAddress>{6:s}</StartAddress>
++ <EndAddress>{7:s}</EndAddress>
++ </IpRange>
++ </IpRanges>
++ </IpScope>
++ </IpScopes>
++ <ParentNetwork href="{8:s}"/>
++ <FenceMode>{9:s}</FenceMode>
++ </Configuration>
++ <IsShared>{10:s}</IsShared>
++ </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
++ subnet_address, dns_address, dhcp_enabled,
++ dhcp_start_address, dhcp_end_address, available_networks,
++ fence_mode, isshared)
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+ try:
+ response = Http.post(url=add_vdc_rest_url,
+ headers=headers,
+ data=data,
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != 201:
- self.logger.debug("Create Network REST : Waiting for Nw creation complete")
++ self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
++ .format(response.status_code,response.content))
+ else:
+ network = networkType.parseString(response.content, True)
+ create_nw_task = network.get_Tasks().get_Task()[0]
+
+ # if we all ok we respond with content after network creation completes
+ # otherwise by default return None
+ if create_nw_task is not None:
- vcenter_conect = None
++ self.logger.debug("Create Network REST : Waiting for Network creation complete")
+ status = vca.block_until_completed(create_nw_task)
+ if status:
+ return response.content
+ else:
+ self.logger.debug("create_network_rest task failed. Network Create response : {}"
+ .format(response.content))
+ except Exception as exp:
+ self.logger.debug("create_network_rest : Exception : {} ".format(exp))
+
+ return None
+
+ def convert_cidr_to_netmask(self, cidr_ip=None):
+ """
+ Method sets convert CIDR netmask address to normal IP format
+ Args:
+ cidr_ip : CIDR IP address
+ Returns:
+ netmask : Converted netmask
+ """
+ if cidr_ip is not None:
+ if '/' in cidr_ip:
+ network, net_bits = cidr_ip.split('/')
+ netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+ else:
+ netmask = cidr_ip
+ return netmask
+ return None
+
+ def get_provider_rest(self, vca=None):
+ """
+ Method gets provider vdc view from vcloud director
+
+ Args:
+ network_name - is network name to be created.
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return xml content of respond or None
+ """
+
+ url_list = [vca.host, '/api/admin']
+ response = Http.get(url=''.join(url_list),
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == requests.codes.ok:
+ return response.content
+ return None
+
+ def create_vdc(self, vdc_name=None):
+
+ vdc_dict = {}
+
+ xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+ if xml_content is not None:
+ try:
+ task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
+ for child in task_resp_xmlroot:
+ if child.tag.split("}")[1] == 'Owner':
+ vdc_id = child.attrib.get('href').split("/")[-1]
+ vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+ return vdc_dict
+ except:
+ self.logger.debug("Respond body {}".format(xml_content))
+
+ return None
+
+ def create_vdc_from_tmpl_rest(self, vdc_name=None):
+ """
+ Method create vdc in vCloud director based on VDC template.
+ it uses pre-defined template that must be named openmano
+
+ Args:
+ vdc_name - name of a new vdc.
+
+ Returns:
+ The return xml content of respond or None
+ """
+
+ self.logger.info("Creating new vdc {}".format(vdc_name))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/vdcTemplates']
+ vm_list_rest_call = ''.join(url_list)
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ # container url to a template
+ vdc_template_ref = None
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ # application/vnd.vmware.admin.providervdc+xml
+ # we need find a template from witch we instantiate VDC
+ if child.tag.split("}")[1] == 'VdcTemplate':
+ if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
+ 'name') == 'openmano':
+ vdc_template_ref = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ # if we didn't found required pre defined template we return None
+ if vdc_template_ref is None:
+ return None
+
+ try:
+ # instantiate vdc
+ url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
+ vm_list_rest_call = ''.join(url_list)
+ data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+ <Source href="{1:s}"></Source>
+ <Description>opnemano</Description>
+ </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
+ response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
+ logger=vca.logger)
+ # if we all ok we respond with content otherwise by default None
+ if response.status_code >= 200 and response.status_code < 300:
+ return response.content
+ return None
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+
+ return None
+
+ def create_vdc_rest(self, vdc_name=None):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ self.logger.info("Creating new vdc {}".format(vdc_name))
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/org/', self.org_uuid]
+ vm_list_rest_call = ''.join(url_list)
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ provider_vdc_ref = None
+ add_vdc_rest_url = None
+ available_networks = None
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+ else:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ # application/vnd.vmware.admin.providervdc+xml
+ if child.tag.split("}")[1] == 'Link':
+ if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
+ and child.attrib.get('rel') == 'add':
+ add_vdc_rest_url = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ response = self.get_provider_rest(vca=vca)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response)
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'ProviderVdcReferences':
+ for sub_child in child:
+ provider_vdc_ref = sub_child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response))
+ return None
+
+ if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+ data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+ <AllocationModel>ReservationPool</AllocationModel>
+ <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+ <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+ </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+ <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+ <ProviderVdcReference
+ name="Main Provider"
+ href="{2:s}" />
+ <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
+ escape(vdc_name),
+ provider_vdc_ref)
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
+ response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
+ logger=vca.logger)
+
+ # if we all ok we respond with content otherwise by default None
+ if response.status_code == 201:
+ return response.content
+ return None
+
+ def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+ """
+ Method retrieve vapp detail from vCloud director
+
+ Args:
+ vapp_uuid - is vapp identifier.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ parsed_respond = {}
+ vca = None
+
+ if need_admin_access:
+ vca = self.connect_as_admin()
+ else:
+ vca = self.connect()
+
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vapp_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
+ get_vapp_restcall = ''.join(url_list)
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = Http.get(url=get_vapp_restcall,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
+ response.status_code))
+ return parsed_respond
+
+ try:
+ xmlroot_respond = XmlElementTree.fromstring(response.content)
+ parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
+
+ namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+ 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+ 'vmw': 'http://www.vmware.com/schema/ovf',
+ 'vm': 'http://www.vmware.com/vcloud/v1.5',
+ 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+ "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+ "xmlns":"http://www.vmware.com/vcloud/v1.5"
+ }
+
+ created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+ if created_section is not None:
+ parsed_respond['created'] = created_section.text
+
+ network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
+ if network_section is not None and 'networkName' in network_section.attrib:
+ parsed_respond['networkname'] = network_section.attrib['networkName']
+
+ ipscopes_section = \
+ xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
+ namespaces)
+ if ipscopes_section is not None:
+ for ipscope in ipscopes_section:
+ for scope in ipscope:
+ tag_key = scope.tag.split("}")[1]
+ if tag_key == 'IpRanges':
+ ip_ranges = scope.getchildren()
+ for ipblock in ip_ranges:
+ for block in ipblock:
+ parsed_respond[block.tag.split("}")[1]] = block.text
+ else:
+ parsed_respond[tag_key] = scope.text
+
+ # parse children section for other attrib
+ children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ if children_section is not None:
+ parsed_respond['name'] = children_section.attrib['name']
+ parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+ if "nestedHypervisorEnabled" in children_section.attrib else None
+ parsed_respond['deployed'] = children_section.attrib['deployed']
+ parsed_respond['status'] = children_section.attrib['status']
+ parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
+ network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+ nic_list = []
+ for adapters in network_adapter:
+ adapter_key = adapters.tag.split("}")[1]
+ if adapter_key == 'PrimaryNetworkConnectionIndex':
+ parsed_respond['primarynetwork'] = adapters.text
+ if adapter_key == 'NetworkConnection':
+ vnic = {}
+ if 'network' in adapters.attrib:
+ vnic['network'] = adapters.attrib['network']
+ for adapter in adapters:
+ setting_key = adapter.tag.split("}")[1]
+ vnic[setting_key] = adapter.text
+ nic_list.append(vnic)
+
+ for link in children_section:
+ if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+ if link.attrib['rel'] == 'screen:acquireTicket':
+ parsed_respond['acquireTicket'] = link.attrib
+ if link.attrib['rel'] == 'screen:acquireMksTicket':
+ parsed_respond['acquireMksTicket'] = link.attrib
+
+ parsed_respond['interfaces'] = nic_list
+ vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ if vCloud_extension_section is not None:
+ vm_vcenter_info = {}
+ vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+ vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ if vmext is not None:
+ vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+ parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+ virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+ vm_virtual_hardware_info = {}
+ if virtual_hardware_section is not None:
+ for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+ if item.find("rasd:Description",namespaces).text == "Hard disk":
+ disk_size = item.find("rasd:HostResource" ,namespaces
+ ).attrib["{"+namespaces['vm']+"}capacity"]
+
+ vm_virtual_hardware_info["disk_size"]= disk_size
+ break
+
+ for link in virtual_hardware_section:
+ if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+ if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+ vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+ break
+
+ parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+ except Exception as exp :
+ self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+ return parsed_respond
+
+ def acuire_console(self, vm_uuid=None):
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vm_uuid is None:
+ return None
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
+ console_dict = vm_dict['acquireTicket']
+ console_rest_call = console_dict['href']
+
+ response = Http.post(url=console_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def modify_vm_disk(self, vapp_uuid, flavor_disk):
+ """
+ Method retrieve vm disk details
+
+ Args:
+ vapp_uuid - is vapp identifier.
+ flavor_disk - disk size as specified in VNFD (flavor)
+
+ Returns:
+ The return network uuid or return None
+ """
+ status = None
+ try:
+ #Flavor disk is in GB convert it into MB
+ flavor_disk = int(flavor_disk) * 1024
+ vm_details = self.get_vapp_details_rest(vapp_uuid)
+ if vm_details:
+ vm_name = vm_details["name"]
+ self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+ if vm_details and "vm_virtual_hardware" in vm_details:
+ vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+ disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+ self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+ if flavor_disk > vm_disk:
+ status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+ self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+ vm_disk, flavor_disk ))
+ else:
+ status = True
+ self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+ return status
+ except Exception as exp:
+ self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+ def modify_vm_disk_rest(self, disk_href , disk_size):
+ """
+ Method retrieve modify vm disk size
+
+ Args:
+ disk_href - vCD API URL to GET and PUT disk data
+ disk_size - disk size as specified in VNFD (flavor)
+
+ Returns:
+ The return network uuid or return None
+ """
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if disk_href is None or disk_size is None:
+ return None
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = Http.get(url=disk_href,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+ response.status_code))
+ return None
+ try:
+ lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+ namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+ namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+ for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+ if item.find("rasd:Description",namespaces).text == "Hard disk":
+ disk_item = item.find("rasd:HostResource" ,namespaces )
+ if disk_item is not None:
+ disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+ break
+
+ data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+ xml_declaration=True)
+
+ #Send PUT request to modify disk size
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+ response = Http.put(url=disk_href,
+ data=data,
+ headers=headers,
+ verify=vca.verify, logger=self.logger)
+
+ if response.status_code != 202:
+ self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+ response.status_code))
+ else:
+ modify_disk_task = taskType.parseString(response.content, True)
+ if type(modify_disk_task) is GenericTask:
+ status = vca.block_until_completed(modify_disk_task)
+ return status
+
+ return None
+
+ except Exception as exp :
+ self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+ return None
+
+ def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+ """
+ Method to attach pci devices to VM
+
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ pci_devices - pci devices infromation as specified in VNFD (flavor)
+
+ Returns:
+ The status of add pci device task , vm object and
+ vcenter_conect object
+ """
+ vm_obj = None
- try:
- vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
- except Exception as exp:
- self.logger.error("Error occurred while getting vCenter infromationn"\
- " for VM : {}".format(exp))
- raise vimconn.vimconnException(message=exp)
+ self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
- if vm_vcenter_info["vm_moref_id"]:
- context = None
- if hasattr(ssl, '_create_unverified_context'):
- context = ssl._create_unverified_context()
++ vcenter_conect, content = self.get_vcenter_content()
++ vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
- vcenter_conect = SmartConnect(
- host=vm_vcenter_info["vm_vcenter_ip"],
- user=vm_vcenter_info["vm_vcenter_user"],
- pwd=vm_vcenter_info["vm_vcenter_password"],
- port=int(vm_vcenter_info["vm_vcenter_port"]),
- sslContext=context)
- atexit.register(Disconnect, vcenter_conect)
- content = vcenter_conect.RetrieveContent()
-
++ if vm_moref_id:
+ try:
+ no_of_pci_devices = len(pci_devices)
+ if no_of_pci_devices > 0:
- host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+ #Get VM and its host
- def get_vm_vcenter_info(self , vapp_uuid):
++ host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+ self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+ if host_obj and vm_obj:
+ #get PCI devies from host on which vapp is currently installed
+ avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+
+ if avilable_pci_devices is None:
+ #find other hosts with active pci devices
+ new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+ content,
+ no_of_pci_devices
+ )
+
+ if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+ #Migrate vm to the host where PCI devices are availble
+ self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+ task = self.relocate_vm(new_host_obj, vm_obj)
+ if task is not None:
+ result = self.wait_for_vcenter_task(task, vcenter_conect)
+ self.logger.info("Migrate VM status: {}".format(result))
+ host_obj = new_host_obj
+ else:
+ self.logger.info("Fail to migrate VM : {}".format(result))
+ raise vimconn.vimconnNotFoundException(
+ "Fail to migrate VM : {} to host {}".format(
+ vmname_andid,
+ new_host_obj)
+ )
+
+ if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+ #Add PCI devices one by one
+ for pci_device in avilable_pci_devices:
+ task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+ if task:
+ status= self.wait_for_vcenter_task(task, vcenter_conect)
+ if status:
+ self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+ else:
+ self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+ return True, vm_obj, vcenter_conect
+ else:
+ self.logger.error("Currently there is no host with"\
+ " {} number of avaialble PCI devices required for VM {}".format(
+ no_of_pci_devices,
+ vmname_andid)
+ )
+ raise vimconn.vimconnNotFoundException(
+ "Currently there is no host with {} "\
+ "number of avaialble PCI devices required for VM {}".format(
+ no_of_pci_devices,
+ vmname_andid))
+ else:
+ self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+
+ except vmodl.MethodFault as error:
+ self.logger.error("Error occurred while adding PCI devices {} ",error)
+ return None, vm_obj, vcenter_conect
+
+ def get_vm_obj(self, content, mob_id):
+ """
+ Method to get the vsphere VM object associated with a given morf ID
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ content - vCenter content object
+ mob_id - mob_id of VM
+
+ Returns:
+ VM and host object
+ """
+ vm_obj = None
+ host_obj = None
+ try :
+ container = content.viewManager.CreateContainerView(content.rootFolder,
+ [vim.VirtualMachine], True
+ )
+ for vm in container.view:
+ mobID = vm._GetMoId()
+ if mobID == mob_id:
+ vm_obj = vm
+ host_obj = vm_obj.runtime.host
+ break
+ except Exception as exp:
+ self.logger.error("Error occurred while finding VM object : {}".format(exp))
+ return host_obj, vm_obj
+
+ def get_pci_devices(self, host, need_devices):
+ """
+ Method to get the details of pci devices on given host
+ Args:
+ host - vSphere host object
+ need_devices - number of pci devices needed on host
+
+ Returns:
+ array of pci devices
+ """
+ all_devices = []
+ all_device_ids = []
+ used_devices_ids = []
+
+ try:
+ if host:
+ pciPassthruInfo = host.config.pciPassthruInfo
+ pciDevies = host.hardware.pciDevice
+
+ for pci_status in pciPassthruInfo:
+ if pci_status.passthruActive:
+ for device in pciDevies:
+ if device.id == pci_status.id:
+ all_device_ids.append(device.id)
+ all_devices.append(device)
+
+ #check if devices are in use
+ avalible_devices = all_devices
+ for vm in host.vm:
+ if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ vm_devices = vm.config.hardware.device
+ for device in vm_devices:
+ if type(device) is vim.vm.device.VirtualPCIPassthrough:
+ if device.backing.id in all_device_ids:
+ for use_device in avalible_devices:
+ if use_device.id == device.backing.id:
+ avalible_devices.remove(use_device)
+ used_devices_ids.append(device.backing.id)
+ self.logger.debug("Device {} from devices {}"\
+ "is in use".format(device.backing.id,
+ device)
+ )
+ if len(avalible_devices) < need_devices:
+ self.logger.debug("Host {} don't have {} number of active devices".format(host,
+ need_devices))
+ self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+ avalible_devices))
+ return None
+ else:
+ required_devices = avalible_devices[:need_devices]
+ self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+ len(avalible_devices),
+ host,
+ need_devices))
+ self.logger.info("Retruning {} devices as {}".format(need_devices,
+ required_devices ))
+ return required_devices
+
+ except Exception as exp:
+ self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+
+ return None
+
+ def get_host_and_PCIdevices(self, content, need_devices):
+ """
+ Method to get the details of pci devices infromation on all hosts
+
+ Args:
+ content - vSphere host object
+ need_devices - number of pci devices needed on host
+
+ Returns:
+ array of pci devices and host object
+ """
+ host_obj = None
+ pci_device_objs = None
+ try:
+ if content:
+ container = content.viewManager.CreateContainerView(content.rootFolder,
+ [vim.HostSystem], True)
+ for host in container.view:
+ devices = self.get_pci_devices(host, need_devices)
+ if devices:
+ host_obj = host
+ pci_device_objs = devices
+ break
+ except Exception as exp:
+ self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+
+ return host_obj,pci_device_objs
+
+ def relocate_vm(self, dest_host, vm) :
+ """
+ Method to get the relocate VM to new host
+
+ Args:
+ dest_host - vSphere host object
+ vm - vSphere VM object
+
+ Returns:
+ task object
+ """
+ task = None
+ try:
+ relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+ task = vm.Relocate(relocate_spec)
+ self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+ except Exception as exp:
+ self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+ dest_host, vm, exp))
+ return task
+
+ def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+ """
+ Waits and provides updates on a vSphere task
+ """
+ while task.info.state == vim.TaskInfo.State.running:
+ time.sleep(2)
+
+ if task.info.state == vim.TaskInfo.State.success:
+ if task.info.result is not None and not hideResult:
+ self.logger.info('{} completed successfully, result: {}'.format(
+ actionName,
+ task.info.result))
+ else:
+ self.logger.info('Task {} completed successfully.'.format(actionName))
+ else:
+ self.logger.error('{} did not complete successfully: {} '.format(
+ actionName,
+ task.info.error)
+ )
+
+ return task.info.result
+
+ def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+ """
+ Method to add pci device in given VM
+
+ Args:
+ host_object - vSphere host object
+ vm_object - vSphere VM object
+ host_pci_dev - host_pci_dev must be one of the devices from the
+ host_object.hardware.pciDevice list
+ which is configured as a PCI passthrough device
+
+ Returns:
+ task object
+ """
+ task = None
+ if vm_object and host_object and host_pci_dev:
+ try :
+ #Add PCI device to VM
+ pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+ systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+
+ if host_pci_dev.id not in systemid_by_pciid:
+ self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+ return None
+
+ deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+ backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+ id=host_pci_dev.id,
+ systemId=systemid_by_pciid[host_pci_dev.id],
+ vendorId=host_pci_dev.vendorId,
+ deviceName=host_pci_dev.deviceName)
+
+ hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+
+ new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+ new_device_config.operation = "add"
+ vmConfigSpec = vim.vm.ConfigSpec()
+ vmConfigSpec.deviceChange = [new_device_config]
+
+ task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+ self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+ host_pci_dev, vm_object, host_object)
+ )
+ except Exception as exp:
+ self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+ host_pci_dev,
+ vm_object,
+ exp))
+ return task
+
- try:
- vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
- if vm_details and "vm_vcenter_info" in vm_details:
- vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-
- return vm_vcenter_info
++ def get_vm_vcenter_info(self):
+ """
+ Method to get details of vCenter and vm
+
+ Args:
+ vapp_uuid - uuid of vApp or VM
+
+ Returns:
+ Moref Id of VM and deails of vCenter
+ """
+ vm_vcenter_info = {}
+
+ if self.vcenter_ip is not None:
+ vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+ else:
+ raise vimconn.vimconnException(message="vCenter IP is not provided."\
+ " Please provide vCenter IP while attaching datacenter to tenant in --config")
+ if self.vcenter_port is not None:
+ vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+ else:
+ raise vimconn.vimconnException(message="vCenter port is not provided."\
+ " Please provide vCenter port while attaching datacenter to tenant in --config")
+ if self.vcenter_user is not None:
+ vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+ else:
+ raise vimconn.vimconnException(message="vCenter user is not provided."\
+ " Please provide vCenter user while attaching datacenter to tenant in --config")
+
+ if self.vcenter_password is not None:
+ vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+ else:
+ raise vimconn.vimconnException(message="vCenter user password is not provided."\
+ " Please provide vCenter user password while attaching datacenter to tenant in --config")
- except Exception as exp:
- self.logger.error("Error occurred while getting vCenter infromationn"\
- " for VM : {}".format(exp))
+
- vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
- if vm_vcenter_info["vm_moref_id"]:
- context = None
- if hasattr(ssl, '_create_unverified_context'):
- context = ssl._create_unverified_context()
- vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
- user=vm_vcenter_info["vm_vcenter_user"],
- pwd=vm_vcenter_info["vm_vcenter_password"],
- port=int(vm_vcenter_info["vm_vcenter_port"]),
- sslContext=context
- )
- atexit.register(Disconnect, vcenter_conect)
- content = vcenter_conect.RetrieveContent()
-
++ return vm_vcenter_info
+
+
+ def get_vm_pci_details(self, vmuuid):
+ """
+ Method to get VM PCI device details from vCenter
+
+ Args:
+ vm_obj - vSphere VM object
+
+ Returns:
+ dict of PCI devives attached to VM
+
+ """
+ vm_pci_devices_info = {}
+ try:
- host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
++ vcenter_conect, content = self.get_vcenter_content()
++ vm_moref_id = self.get_vm_moref_id(vmuuid)
++ if vm_moref_id:
+ #Get VM and its host
+ if content:
++ host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+ if host_obj and vm_obj:
+ vm_pci_devices_info["host_name"]= host_obj.name
+ vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+ for device in vm_obj.config.hardware.device:
+ if type(device) == vim.vm.device.VirtualPCIPassthrough:
+ device_details={'devide_id':device.backing.id,
+ 'pciSlotNumber':device.slotInfo.pciSlotNumber,
+ }
+ vm_pci_devices_info[device.deviceInfo.label] = device_details
+ else:
+ self.logger.error("Can not connect to vCenter while getting "\
+ "PCI devices infromationn")
+ return vm_pci_devices_info
+ except Exception as exp:
+ self.logger.error("Error occurred while getting VM infromationn"\
+ " for VM : {}".format(exp))
+ raise vimconn.vimconnException(message=exp)
+
++ def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
++ """
++ Method to add network adapter type to vm
++ Args :
++ network_name - name of network
++ primary_nic_index - int value for primary nic index
++ nicIndex - int value for nic index
++ nic_type - specify model name to which add to vm
++ Returns:
++ None
++ """
++ vca = self.connect()
++ if not vca:
++ raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
++
++ try:
++ ip_address = None
++ floating_ip = False
++ if 'floating_ip' in net: floating_ip = net['floating_ip']
++
++ # Stub for ip_address feature
++ if 'ip_address' in net: ip_address = net['ip_address']
++
++ if floating_ip:
++ allocation_mode = "POOL"
++ elif ip_address:
++ allocation_mode = "MANUAL"
++ else:
++ allocation_mode = "DHCP"
++
++ if not nic_type:
++ for vms in vapp._get_vms():
++ vm_id = (vms.id).split(':')[-1]
++
++ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
++
++ response = Http.get(url=url_rest_call,
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=vca.logger)
++ if response.status_code != 200:
++ self.logger.error("REST call {} failed reason : {}"\
++ "status code : {}".format(url_rest_call,
++ response.content,
++ response.status_code))
++ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
++ "network connection section")
++
++ data = response.content
++ if '<PrimaryNetworkConnectionIndex>' not in data:
++ item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
++ <NetworkConnection network="{}">
++ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
++ <IsConnected>true</IsConnected>
++ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
++ </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
++ allocation_mode)
++ # Stub for ip_address feature
++ if ip_address:
++ ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
++ item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
++
++ data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
++ else:
++ new_item = """<NetworkConnection network="{}">
++ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
++ <IsConnected>true</IsConnected>
++ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
++ </NetworkConnection>""".format(network_name, nicIndex,
++ allocation_mode)
++ # Stub for ip_address feature
++ if ip_address:
++ ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
++ new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
++
++ data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
++
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
++ response = Http.put(url=url_rest_call, headers=headers, data=data,
++ verify=vca.verify,
++ logger=vca.logger)
++ if response.status_code != 202:
++ self.logger.error("REST call {} failed reason : {}"\
++ "status code : {} ".format(url_rest_call,
++ response.content,
++ response.status_code))
++ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
++ "network connection section")
++ else:
++ nic_task = taskType.parseString(response.content, True)
++ if isinstance(nic_task, GenericTask):
++ vca.block_until_completed(nic_task)
++ self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
++ "default NIC type".format(vm_id))
++ else:
++ self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
++ "connect NIC type".format(vm_id))
++ else:
++ for vms in vapp._get_vms():
++ vm_id = (vms.id).split(':')[-1]
++
++ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
++
++ response = Http.get(url=url_rest_call,
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=vca.logger)
++ if response.status_code != 200:
++ self.logger.error("REST call {} failed reason : {}"\
++ "status code : {}".format(url_rest_call,
++ response.content,
++ response.status_code))
++ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
++ "network connection section")
++ data = response.content
++ if '<PrimaryNetworkConnectionIndex>' not in data:
++ item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
++ <NetworkConnection network="{}">
++ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
++ <IsConnected>true</IsConnected>
++ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
++ <NetworkAdapterType>{}</NetworkAdapterType>
++ </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
++ allocation_mode, nic_type)
++ # Stub for ip_address feature
++ if ip_address:
++ ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
++ item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
++
++ data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
++ else:
++ new_item = """<NetworkConnection network="{}">
++ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
++ <IsConnected>true</IsConnected>
++ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
++ <NetworkAdapterType>{}</NetworkAdapterType>
++ </NetworkConnection>""".format(network_name, nicIndex,
++ allocation_mode, nic_type)
++ # Stub for ip_address feature
++ if ip_address:
++ ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
++ new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
++
++ data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
++
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
++ response = Http.put(url=url_rest_call, headers=headers, data=data,
++ verify=vca.verify,
++ logger=vca.logger)
++
++ if response.status_code != 202:
++ self.logger.error("REST call {} failed reason : {}"\
++ "status code : {}".format(url_rest_call,
++ response.content,
++ response.status_code))
++ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
++ "network connection section")
++ else:
++ nic_task = taskType.parseString(response.content, True)
++ if isinstance(nic_task, GenericTask):
++ vca.block_until_completed(nic_task)
++ self.logger.info("add_network_adapter_to_vms(): VM {} "\
++ "conneced to NIC type {}".format(vm_id, nic_type))
++ else:
++ self.logger.error("add_network_adapter_to_vms(): VM {} "\
++ "failed to connect NIC type {}".format(vm_id, nic_type))
++ except Exception as exp:
++ self.logger.error("add_network_adapter_to_vms() : exception occurred "\
++ "while adding Network adapter")
++ raise vimconn.vimconnException(message=exp)
++
++
++ def set_numa_affinity(self, vmuuid, paired_threads_id):
++ """
++ Method to assign numa affinity in vm configuration parammeters
++ Args :
++ vmuuid - vm uuid
++ paired_threads_id - one or more virtual processor
++ numbers
++ Returns:
++ return if True
++ """
++ try:
++ vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
++ if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
++ context = None
++ if hasattr(ssl, '_create_unverified_context'):
++ context = ssl._create_unverified_context()
++ vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
++ pwd=self.passwd, port=int(vm_vcenter_port),
++ sslContext=context)
++ atexit.register(Disconnect, vcenter_conect)
++ content = vcenter_conect.RetrieveContent()
++
++ host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
++ if vm_obj:
++ config_spec = vim.vm.ConfigSpec()
++ config_spec.extraConfig = []
++ opt = vim.option.OptionValue()
++ opt.key = 'numa.nodeAffinity'
++ opt.value = str(paired_threads_id)
++ config_spec.extraConfig.append(opt)
++ task = vm_obj.ReconfigVM_Task(config_spec)
++ if task:
++ result = self.wait_for_vcenter_task(task, vcenter_conect)
++ extra_config = vm_obj.config.extraConfig
++ flag = False
++ for opts in extra_config:
++ if 'numa.nodeAffinity' in opts.key:
++ flag = True
++ self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
++ "value {} for vm {}".format(opt.value, vm_obj))
++ if flag:
++ return
++ else:
++ self.logger.error("set_numa_affinity: Failed to assign numa affinity")
++ except Exception as exp:
++ self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
++ "for VM {} : {}".format(vm_obj, vm_moref_id))
++ raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
++ "affinity".format(exp))
++
++
++
++ def cloud_init(self, vapp, cloud_config):
++ """
++ Method to inject ssh-key
++ vapp - vapp object
++ cloud_config a dictionary with:
++ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
++ 'users': (optional) list of users to be inserted, each item is a dict with:
++ 'name': (mandatory) user name,
++ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
++ 'user-data': (optional) string is a text script to be passed directly to cloud-init
++ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
++ 'dest': (mandatory) string with the destination absolute path
++ 'encoding': (optional, by default text). Can be one of:
++ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
++ 'content' (mandatory): string with the content of the file
++ 'permissions': (optional) string with file permissions, typically octal notation '0644'
++ 'owner': (optional) file owner, string with the format 'owner:group'
++ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
++ """
++ vca = self.connect()
++ if not vca:
++ raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
++
++ try:
++ if isinstance(cloud_config, dict):
++ key_pairs = []
++ userdata = []
++ if "key-pairs" in cloud_config:
++ key_pairs = cloud_config["key-pairs"]
++
++ if "users" in cloud_config:
++ userdata = cloud_config["users"]
++
++ for key in key_pairs:
++ for user in userdata:
++ if 'name' in user: user_name = user['name']
++ if 'key-pairs' in user and len(user['key-pairs']) > 0:
++ for user_key in user['key-pairs']:
++ customize_script = """
++ #!/bin/bash
++ echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
++ if [ "$1" = "precustomization" ];then
++ echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
++ if [ ! -d /root/.ssh ];then
++ mkdir /root/.ssh
++ chown root:root /root/.ssh
++ chmod 700 /root/.ssh
++ touch /root/.ssh/authorized_keys
++ chown root:root /root/.ssh/authorized_keys
++ chmod 600 /root/.ssh/authorized_keys
++ # make centos with selinux happy
++ which restorecon && restorecon -Rv /root/.ssh
++ echo '{key}' >> /root/.ssh/authorized_keys
++ else
++ touch /root/.ssh/authorized_keys
++ chown root:root /root/.ssh/authorized_keys
++ chmod 600 /root/.ssh/authorized_keys
++ echo '{key}' >> /root/.ssh/authorized_keys
++ fi
++ if [ -d /home/{user_name} ];then
++ if [ ! -d /home/{user_name}/.ssh ];then
++ mkdir /home/{user_name}/.ssh
++ chown {user_name}:{user_name} /home/{user_name}/.ssh
++ chmod 700 /home/{user_name}/.ssh
++ touch /home/{user_name}/.ssh/authorized_keys
++ chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
++ chmod 600 /home/{user_name}/.ssh/authorized_keys
++ # make centos with selinux happy
++ which restorecon && restorecon -Rv /home/{user_name}/.ssh
++ echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
++ else
++ touch /home/{user_name}/.ssh/authorized_keys
++ chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
++ chmod 600 /home/{user_name}/.ssh/authorized_keys
++ echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
++ fi
++ fi
++ fi""".format(key=key, user_name=user_name, user_key=user_key)
++
++ for vm in vapp._get_vms():
++ vm_name = vm.name
++ task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
++ if isinstance(task, GenericTask):
++ vca.block_until_completed(task)
++ self.logger.info("cloud_init : customized guest os task "\
++ "completed for VM {}".format(vm_name))
++ else:
++ self.logger.error("cloud_init : task for customized guest os"\
++ "failed for VM {}".format(vm_name))
++ except Exception as exp:
++ self.logger.error("cloud_init : exception occurred while injecting "\
++ "ssh-key")
++ raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
++ "ssh-key".format(exp))
++
++
++ def add_new_disk(self, vca, vapp_uuid, disk_size):
++ """
++ Method to create an empty vm disk
++
++ Args:
++ vapp_uuid - is vapp identifier.
++ disk_size - size of disk to be created in GB
++
++ Returns:
++ None
++ """
++ status = False
++ vm_details = None
++ try:
++ #Disk size in GB, convert it into MB
++ if disk_size is not None:
++ disk_size_mb = int(disk_size) * 1024
++ vm_details = self.get_vapp_details_rest(vapp_uuid)
++
++ if vm_details and "vm_virtual_hardware" in vm_details:
++ self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
++ disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
++ status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
++
++ except Exception as exp:
++ msg = "Error occurred while creating new disk {}.".format(exp)
++ self.rollback_newvm(vapp_uuid, msg)
++
++ if status:
++ self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
++ else:
++ #If failed to add disk, delete VM
++ msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
++ self.rollback_newvm(vapp_uuid, msg)
++
++
++ def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
++ """
++ Retrives vApp Disks section & add new empty disk
++
++ Args:
++ disk_href: Disk section href to addd disk
++ disk_size_mb: Disk size in MB
++
++ Returns: Status of add new disk task
++ """
++ status = False
++ if vca.vcloud_session and vca.vcloud_session.organization:
++ response = Http.get(url=disk_href,
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=vca.logger)
++
++ if response.status_code != requests.codes.ok:
++ self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
++ .format(disk_href, response.status_code))
++ return status
++ try:
++ #Find but type & max of instance IDs assigned to disks
++ lxmlroot_respond = lxmlElementTree.fromstring(response.content)
++ namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
++ namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
++ instance_id = 0
++ for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
++ if item.find("rasd:Description",namespaces).text == "Hard disk":
++ inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
++ if inst_id > instance_id:
++ instance_id = inst_id
++ disk_item = item.find("rasd:HostResource" ,namespaces)
++ bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
++ bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
++
++ instance_id = instance_id + 1
++ new_item = """<Item>
++ <rasd:Description>Hard disk</rasd:Description>
++ <rasd:ElementName>New disk</rasd:ElementName>
++ <rasd:HostResource
++ xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
++ vcloud:capacity="{}"
++ vcloud:busSubType="{}"
++ vcloud:busType="{}"></rasd:HostResource>
++ <rasd:InstanceID>{}</rasd:InstanceID>
++ <rasd:ResourceType>17</rasd:ResourceType>
++ </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
++
++ new_data = response.content
++ #Add new item at the bottom
++ new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
++
++ # Send PUT request to modify virtual hardware section with new disk
++ headers = vca.vcloud_session.get_vcloud_headers()
++ headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
++
++ response = Http.put(url=disk_href,
++ data=new_data,
++ headers=headers,
++ verify=vca.verify, logger=self.logger)
++
++ if response.status_code != 202:
++ self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
++ .format(disk_href, response.status_code, response.content))
++ else:
++ add_disk_task = taskType.parseString(response.content, True)
++ if type(add_disk_task) is GenericTask:
++ status = vca.block_until_completed(add_disk_task)
++ if not status:
++ self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
++
++ except Exception as exp:
++ self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
++
++ return status
++
++
++ def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
++ """
++ Method to add existing disk to vm
++ Args :
++ catalogs - List of VDC catalogs
++ image_id - Catalog ID
++ template_name - Name of template in catalog
++ vapp_uuid - UUID of vApp
++ Returns:
++ None
++ """
++ disk_info = None
++ vcenter_conect, content = self.get_vcenter_content()
++ #find moref-id of vm in image
++ catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
++ image_id=image_id,
++ )
++
++ if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
++ if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
++ catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
++ if catalog_vm_moref_id:
++ self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
++ host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
++ if catalog_vm_obj:
++ #find existing disk
++ disk_info = self.find_disk(catalog_vm_obj)
++ else:
++ exp_msg = "No VM with image id {} found".format(image_id)
++ self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
++ else:
++ exp_msg = "No Image found with image ID {} ".format(image_id)
++ self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
++
++ if disk_info:
++ self.logger.info("Existing disk_info : {}".format(disk_info))
++ #get VM
++ vm_moref_id = self.get_vm_moref_id(vapp_uuid)
++ host, vm_obj = self.get_vm_obj(content, vm_moref_id)
++ if vm_obj:
++ status = self.add_disk(vcenter_conect=vcenter_conect,
++ vm=vm_obj,
++ disk_info=disk_info,
++ size=size,
++ vapp_uuid=vapp_uuid
++ )
++ if status:
++ self.logger.info("Disk from image id {} added to {}".format(image_id,
++ vm_obj.config.name)
++ )
++ else:
++ msg = "No disk found with image id {} to add in VM {}".format(
++ image_id,
++ vm_obj.config.name)
++ self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
++
++
++ def find_disk(self, vm_obj):
++ """
++ Method to find details of existing disk in VM
++ Args :
++ vm_obj - vCenter object of VM
++ image_id - Catalog ID
++ Returns:
++ disk_info : dict of disk details
++ """
++ disk_info = {}
++ if vm_obj:
++ try:
++ devices = vm_obj.config.hardware.device
++ for device in devices:
++ if type(device) is vim.vm.device.VirtualDisk:
++ if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
++ disk_info["full_path"] = device.backing.fileName
++ disk_info["datastore"] = device.backing.datastore
++ disk_info["capacityKB"] = device.capacityInKB
++ break
++ except Exception as exp:
++ self.logger.error("find_disk() : exception occurred while "\
++ "getting existing disk details :{}".format(exp))
++ return disk_info
++
++
++ def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
++ """
++ Method to add existing disk in VM
++ Args :
++ vcenter_conect - vCenter content object
++ vm - vCenter vm object
++ disk_info : dict of disk details
++ Returns:
++ status : status of add disk task
++ """
++ datastore = disk_info["datastore"] if "datastore" in disk_info else None
++ fullpath = disk_info["full_path"] if "full_path" in disk_info else None
++ capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
++ if size is not None:
++ #Convert size from GB to KB
++ sizeKB = int(size) * 1024 * 1024
++ #compare size of existing disk and user given size.Assign whicherver is greater
++ self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
++ sizeKB, capacityKB))
++ if sizeKB > capacityKB:
++ capacityKB = sizeKB
++
++ if datastore and fullpath and capacityKB:
++ try:
++ spec = vim.vm.ConfigSpec()
++ # get all disks on a VM, set unit_number to the next available
++ unit_number = 0
++ for dev in vm.config.hardware.device:
++ if hasattr(dev.backing, 'fileName'):
++ unit_number = int(dev.unitNumber) + 1
++ # unit_number 7 reserved for scsi controller
++ if unit_number == 7:
++ unit_number += 1
++ if isinstance(dev, vim.vm.device.VirtualDisk):
++ #vim.vm.device.VirtualSCSIController
++ controller_key = dev.controllerKey
++
++ self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
++ unit_number, controller_key))
++ # add disk here
++ dev_changes = []
++ disk_spec = vim.vm.device.VirtualDeviceSpec()
++ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
++ disk_spec.device = vim.vm.device.VirtualDisk()
++ disk_spec.device.backing = \
++ vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
++ disk_spec.device.backing.thinProvisioned = True
++ disk_spec.device.backing.diskMode = 'persistent'
++ disk_spec.device.backing.datastore = datastore
++ disk_spec.device.backing.fileName = fullpath
++
++ disk_spec.device.unitNumber = unit_number
++ disk_spec.device.capacityInKB = capacityKB
++ disk_spec.device.controllerKey = controller_key
++ dev_changes.append(disk_spec)
++ spec.deviceChange = dev_changes
++ task = vm.ReconfigVM_Task(spec=spec)
++ status = self.wait_for_vcenter_task(task, vcenter_conect)
++ return status
++ except Exception as exp:
++ exp_msg = "add_disk() : exception {} occurred while adding disk "\
++ "{} to vm {}".format(exp,
++ fullpath,
++ vm.config.name)
++ self.rollback_newvm(vapp_uuid, exp_msg)
++ else:
++ msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
++ self.rollback_newvm(vapp_uuid, msg)
++
++
++ def get_vcenter_content(self):
++ """
++ Get the vsphere content object
++ """
++ try:
++ vm_vcenter_info = self.get_vm_vcenter_info()
++ except Exception as exp:
++ self.logger.error("Error occurred while getting vCenter infromationn"\
++ " for VM : {}".format(exp))
++ raise vimconn.vimconnException(message=exp)
++
++ context = None
++ if hasattr(ssl, '_create_unverified_context'):
++ context = ssl._create_unverified_context()
++
++ vcenter_conect = SmartConnect(
++ host=vm_vcenter_info["vm_vcenter_ip"],
++ user=vm_vcenter_info["vm_vcenter_user"],
++ pwd=vm_vcenter_info["vm_vcenter_password"],
++ port=int(vm_vcenter_info["vm_vcenter_port"]),
++ sslContext=context
++ )
++ atexit.register(Disconnect, vcenter_conect)
++ content = vcenter_conect.RetrieveContent()
++ return vcenter_conect, content
++
++
++ def get_vm_moref_id(self, vapp_uuid):
++ """
++ Get the moref_id of given VM
++ """
++ try:
++ if vapp_uuid:
++ vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
++ if vm_details and "vm_vcenter_info" in vm_details:
++ vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
++
++ return vm_moref_id
++
++ except Exception as exp:
++ self.logger.error("Error occurred while getting VM moref ID "\
++ " for VM : {}".format(exp))
++ return None
++
++
++ def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
++ """
++ Method to get vApp template details
++ Args :
++ catalogs - list of VDC catalogs
++ image_id - Catalog ID to find
++ template_name : template name in catalog
++ Returns:
++ parsed_respond : dict of vApp tempalte details
++ """
++ parsed_response = {}
++
++ vca = self.connect_as_admin()
++ if not vca:
++ raise vimconn.vimconnConnectionException("self.connect() is failed")
++
++ try:
++ catalog = self.get_catalog_obj(image_id, catalogs)
++ if catalog:
++ template_name = self.get_catalogbyid(image_id, catalogs)
++ catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
++ if len(catalog_items) == 1:
++ response = Http.get(catalog_items[0].get_href(),
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=vca.logger)
++ catalogItem = XmlElementTree.fromstring(response.content)
++ entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
++ vapp_tempalte_href = entity.get("href")
++ #get vapp details and parse moref id
++
++ namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
++ 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
++ 'vmw': 'http://www.vmware.com/schema/ovf',
++ 'vm': 'http://www.vmware.com/vcloud/v1.5',
++ 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
++ 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
++ 'xmlns':"http://www.vmware.com/vcloud/v1.5"
++ }
++
++ if vca.vcloud_session and vca.vcloud_session.organization:
++ response = Http.get(url=vapp_tempalte_href,
++ headers=vca.vcloud_session.get_vcloud_headers(),
++ verify=vca.verify,
++ logger=vca.logger
++ )
++
++ if response.status_code != requests.codes.ok:
++ self.logger.debug("REST API call {} failed. Return status code {}".format(
++ vapp_tempalte_href, response.status_code))
++
++ else:
++ xmlroot_respond = XmlElementTree.fromstring(response.content)
++ children_section = xmlroot_respond.find('vm:Children/', namespaces)
++ if children_section is not None:
++ vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
++ if vCloud_extension_section is not None:
++ vm_vcenter_info = {}
++ vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
++ vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
++ if vmext is not None:
++ vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
++ parsed_response["vm_vcenter_info"]= vm_vcenter_info
++
++ except Exception as exp :
++ self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
++
++ return parsed_response
++
++
++ def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
++ """
++ Method to delete vApp
++ Args :
++ vapp_uuid - vApp UUID
++ msg - Error message to be logged
++ exp_type : Exception type
++ Returns:
++ None
++ """
++ if vapp_uuid:
++ status = self.delete_vminstance(vapp_uuid)
++ else:
++ msg = "No vApp ID"
++ self.logger.error(msg)
++ if exp_type == "Genric":
++ raise vimconn.vimconnException(msg)
++ elif exp_type == "NotFound":
++ raise vimconn.vimconnNotFoundException(message=msg)
++
++ def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
++ """
++ Method to attach SRIOV adapters to VM
++
++ Args:
++ vapp_uuid - uuid of vApp/VM
++ sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
++ vmname_andid - vmname
++
++ Returns:
++ The status of add SRIOV adapter task , vm object and
++ vcenter_conect object
++ """
++ vm_obj = None
++ vcenter_conect, content = self.get_vcenter_content()
++ vm_moref_id = self.get_vm_moref_id(vapp_uuid)
++
++ if vm_moref_id:
++ try:
++ no_of_sriov_devices = len(sriov_nets)
++ if no_of_sriov_devices > 0:
++ #Get VM and its host
++ host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
++ self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
++ if host_obj and vm_obj:
++ #get SRIOV devies from host on which vapp is currently installed
++ avilable_sriov_devices = self.get_sriov_devices(host_obj,
++ no_of_sriov_devices,
++ )
++
++ if len(avilable_sriov_devices) == 0:
++ #find other hosts with active pci devices
++ new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
++ content,
++ no_of_sriov_devices,
++ )
++
++ if new_host_obj is not None and len(avilable_sriov_devices)> 0:
++ #Migrate vm to the host where SRIOV devices are available
++ self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
++ new_host_obj))
++ task = self.relocate_vm(new_host_obj, vm_obj)
++ if task is not None:
++ result = self.wait_for_vcenter_task(task, vcenter_conect)
++ self.logger.info("Migrate VM status: {}".format(result))
++ host_obj = new_host_obj
++ else:
++ self.logger.info("Fail to migrate VM : {}".format(result))
++ raise vimconn.vimconnNotFoundException(
++ "Fail to migrate VM : {} to host {}".format(
++ vmname_andid,
++ new_host_obj)
++ )
++
++ if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
++ #Add SRIOV devices one by one
++ for sriov_net in sriov_nets:
++ network_name = sriov_net.get('net_id')
++ dvs_portgr_name = self.create_dvPort_group(network_name)
++ if sriov_net.get('type') == "VF":
++ #add vlan ID ,Modify portgroup for vlan ID
++ self.configure_vlanID(content, vcenter_conect, network_name)
++
++ task = self.add_sriov_to_vm(content,
++ vm_obj,
++ host_obj,
++ network_name,
++ avilable_sriov_devices[0]
++ )
++ if task:
++ status= self.wait_for_vcenter_task(task, vcenter_conect)
++ if status:
++ self.logger.info("Added SRIOV {} to VM {}".format(
++ no_of_sriov_devices,
++ str(vm_obj)))
++ else:
++ self.logger.error("Fail to add SRIOV {} to VM {}".format(
++ no_of_sriov_devices,
++ str(vm_obj)))
++ raise vimconn.vimconnUnexpectedResponse(
++ "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
++ )
++ return True, vm_obj, vcenter_conect
++ else:
++ self.logger.error("Currently there is no host with"\
++ " {} number of avaialble SRIOV "\
++ "VFs required for VM {}".format(
++ no_of_sriov_devices,
++ vmname_andid)
++ )
++ raise vimconn.vimconnNotFoundException(
++ "Currently there is no host with {} "\
++ "number of avaialble SRIOV devices required for VM {}".format(
++ no_of_sriov_devices,
++ vmname_andid))
++ else:
++ self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
++
++ except vmodl.MethodFault as error:
++ self.logger.error("Error occurred while adding SRIOV {} ",error)
++ return None, vm_obj, vcenter_conect
++
++
++ def get_sriov_devices(self,host, no_of_vfs):
++ """
++ Method to get the details of SRIOV devices on given host
++ Args:
++ host - vSphere host object
++ no_of_vfs - number of VFs needed on host
++
++ Returns:
++ array of SRIOV devices
++ """
++ sriovInfo=[]
++ if host:
++ for device in host.config.pciPassthruInfo:
++ if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
++ if device.numVirtualFunction >= no_of_vfs:
++ sriovInfo.append(device)
++ break
++ return sriovInfo
++
++
++ def get_host_and_sriov_devices(self, content, no_of_vfs):
++ """
++ Method to get the details of SRIOV devices infromation on all hosts
++
++ Args:
++ content - vSphere host object
++ no_of_vfs - number of pci VFs needed on host
++
++ Returns:
++ array of SRIOV devices and host object
++ """
++ host_obj = None
++ sriov_device_objs = None
++ try:
++ if content:
++ container = content.viewManager.CreateContainerView(content.rootFolder,
++ [vim.HostSystem], True)
++ for host in container.view:
++ devices = self.get_sriov_devices(host, no_of_vfs)
++ if devices:
++ host_obj = host
++ sriov_device_objs = devices
++ break
++ except Exception as exp:
++ self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
++
++ return host_obj,sriov_device_objs
++
++
++ def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
++ """
++ Method to add SRIOV adapter to vm
++
++ Args:
++ host_obj - vSphere host object
++ vm_obj - vSphere vm object
++ content - vCenter content object
++ network_name - name of distributed virtaul portgroup
++ sriov_device - SRIOV device info
++
++ Returns:
++ task object
++ """
++ devices = []
++ vnic_label = "sriov nic"
++ try:
++ dvs_portgr = self.get_dvport_group(network_name)
++ network_name = dvs_portgr.name
++ nic = vim.vm.device.VirtualDeviceSpec()
++ # VM device
++ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
++ nic.device = vim.vm.device.VirtualSriovEthernetCard()
++ nic.device.addressType = 'assigned'
++ #nic.device.key = 13016
++ nic.device.deviceInfo = vim.Description()
++ nic.device.deviceInfo.label = vnic_label
++ nic.device.deviceInfo.summary = network_name
++ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
++
++ nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
++ nic.device.backing.deviceName = network_name
++ nic.device.backing.useAutoDetect = False
++ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
++ nic.device.connectable.startConnected = True
++ nic.device.connectable.allowGuestControl = True
++
++ nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
++ nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
++ nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
++
++ devices.append(nic)
++ vmconf = vim.vm.ConfigSpec(deviceChange=devices)
++ task = vm_obj.ReconfigVM_Task(vmconf)
++ return task
++ except Exception as exp:
++ self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
++ return None
++
++
++ def create_dvPort_group(self, network_name):
++ """
++ Method to create disributed virtual portgroup
++
++ Args:
++ network_name - name of network/portgroup
++
++ Returns:
++ portgroup key
++ """
++ try:
++ new_network_name = [network_name, '-', str(uuid.uuid4())]
++ network_name=''.join(new_network_name)
++ vcenter_conect, content = self.get_vcenter_content()
++
++ dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
++ if dv_switch:
++ dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
++ dv_pg_spec.name = network_name
++
++ dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
++ dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
++ dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
++ dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
++ dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
++ dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
++
++ task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
++ self.wait_for_vcenter_task(task, vcenter_conect)
++
++ dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
++ if dvPort_group:
++ self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
++ return dvPort_group.key
++ else:
++ self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
++
++ except Exception as exp:
++ self.logger.error("Error occurred while creating disributed virtaul port group {}"\
++ " : {}".format(network_name, exp))
++ return None
++
++ def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
++ """
++ Method to reconfigure disributed virtual portgroup
++
++ Args:
++ dvPort_group_name - name of disributed virtual portgroup
++ content - vCenter content object
++ config_info - disributed virtual portgroup configuration
++
++ Returns:
++ task object
++ """
++ try:
++ dvPort_group = self.get_dvport_group(dvPort_group_name)
++ if dvPort_group:
++ dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
++ dv_pg_spec.configVersion = dvPort_group.config.configVersion
++ dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
++ if "vlanID" in config_info:
++ dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
++ dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
++
++ task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
++ return task
++ else:
++ return None
++ except Exception as exp:
++ self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
++ " : {}".format(dvPort_group_name, exp))
++ return None
++
++
++ def destroy_dvport_group(self , dvPort_group_name):
++ """
++ Method to destroy disributed virtual portgroup
++
++ Args:
++ network_name - name of network/portgroup
++
++ Returns:
++ True if portgroup successfully got deleted else false
++ """
++ vcenter_conect, content = self.get_vcenter_content()
++ try:
++ status = None
++ dvPort_group = self.get_dvport_group(dvPort_group_name)
++ if dvPort_group:
++ task = dvPort_group.Destroy_Task()
++ status = self.wait_for_vcenter_task(task, vcenter_conect)
++ return status
++ except vmodl.MethodFault as exp:
++ self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
++ exp, dvPort_group_name))
++ return None
++
++
++ def get_dvport_group(self, dvPort_group_name):
++ """
++ Method to get disributed virtual portgroup
++
++ Args:
++ network_name - name of network/portgroup
++
++ Returns:
++ portgroup object
++ """
++ vcenter_conect, content = self.get_vcenter_content()
++ dvPort_group = None
++ try:
++ container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
++ for item in container.view:
++ if item.key == dvPort_group_name:
++ dvPort_group = item
++ break
++ return dvPort_group
++ except vmodl.MethodFault as exp:
++ self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
++ exp, dvPort_group_name))
++ return None
++
++ def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
++ """
++ Method to get disributed virtual portgroup vlanID
++
++ Args:
++ network_name - name of network/portgroup
++
++ Returns:
++ vlan ID
++ """
++ vlanId = None
++ try:
++ dvPort_group = self.get_dvport_group(dvPort_group_name)
++ if dvPort_group:
++ vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
++ except vmodl.MethodFault as exp:
++ self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
++ exp, dvPort_group_name))
++ return vlanId
++
++
++ def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
++ """
++ Method to configure vlanID in disributed virtual portgroup vlanID
++
++ Args:
++ network_name - name of network/portgroup
++
++ Returns:
++ None
++ """
++ vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
++ if vlanID == 0:
++ #configure vlanID
++ vlanID = self.genrate_vlanID(dvPort_group_name)
++ config = {"vlanID":vlanID}
++ task = self.reconfig_portgroup(content, dvPort_group_name,
++ config_info=config)
++ if task:
++ status= self.wait_for_vcenter_task(task, vcenter_conect)
++ if status:
++ self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
++ dvPort_group_name,vlanID))
++ else:
++ self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
++ dvPort_group_name, vlanID))
++
++
++ def genrate_vlanID(self, network_name):
++ """
++ Method to get unused vlanID
++ Args:
++ network_name - name of network/portgroup
++ Returns:
++ vlanID
++ """
++ vlan_id = None
++ used_ids = []
++ if self.config.get('vlanID_range') == None:
++ raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
++ "at config value before creating sriov network with vlan tag")
++ if "used_vlanIDs" not in self.persistent_info:
++ self.persistent_info["used_vlanIDs"] = {}
++ else:
++ used_ids = self.persistent_info["used_vlanIDs"].values()
++
++ for vlanID_range in self.config.get('vlanID_range'):
++ start_vlanid , end_vlanid = vlanID_range.split("-")
++ if start_vlanid > end_vlanid:
++ raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
++ vlanID_range))
++
++ for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
++ if id not in used_ids:
++ vlan_id = id
++ self.persistent_info["used_vlanIDs"][network_name] = vlan_id
++ return vlan_id
++ if vlan_id is None:
++ raise vimconn.vimconnConflictException("All Vlan IDs are in use")
++
++
++ def get_obj(self, content, vimtype, name):
++ """
++ Get the vsphere object associated with a given text name
++ """
++ obj = None
++ container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
++ for item in container.view:
++ if item.name == name:
++ obj = item
++ break
++ return obj
++
done
}
- function db_exists() {
- RESULT=`mysqlshow --defaults-extra-file="$2" | grep -v Wildcard | grep -o $1`
- if [ "$RESULT" == "$1" ]; then
- echo " DB $1 exists"
- return 0
- fi
- echo " DB $1 does not exist"
- return 1
- }
-
GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
DBUSER="root"
DBPASSWD=""
DBPASSWD_PARAM=""
#################################################################
##### INSTALL REQUIRED PACKAGES #####
#################################################################'
- [ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-server"
- [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb mariadb-server"
-[ "$_DISTRO" == "Ubuntu" ] && install_packages "git screen wget mysql-client"
-[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git screen wget mariadb-client"
++[ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-client"
++[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb-client"
- if [[ "$_DISTRO" == "Ubuntu" ]]
- then
- #start services. By default CentOS does not start services
- service mysql start >> /dev/null
- # try to set admin password, ignore if fails
- [[ -n $DBPASSWD ]] && mysqladmin -u $DBUSER -s password $DBPASSWD
- fi
+
- if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
- then
- #start services. By default CentOS does not start services
- service mariadb start
- service httpd start
- systemctl enable mariadb
- systemctl enable httpd
- read -e -p "Do you want to configure mariadb (recommended if not done before) (Y/n)" KK
- [ "$KK" != "n" -a "$KK" != "no" ] && mysql_secure_installation
-
- read -e -p "Do you want to set firewall to grant web access port 80,443 (Y/n)" KK
- [ "$KK" != "n" -a "$KK" != "no" ] &&
- firewall-cmd --permanent --zone=public --add-service=http &&
- firewall-cmd --permanent --zone=public --add-service=https &&
- firewall-cmd --reload
- fi
- fi #[[ -z "$NO_PACKAGES" ]]
-
- #check and ask for database user password. Must be done after database installation
- if [[ -n $QUIET_MODE ]]
- then
- echo -e "\nCheking database connection and ask for credentials"
- while ! mysqladmin -s -u$DBUSER $DBPASSWD_PARAM status >/dev/null
- do
- [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
- [ -z "$logintry" ] && echo -e "\nProvide database credentials"
- read -e -p "database user? ($DBUSER) " DBUSER_
- [ -n "$DBUSER_" ] && DBUSER=$DBUSER_
- read -e -s -p "database password? (Enter for not using password) " DBPASSWD_
- [ -n "$DBPASSWD_" ] && DBPASSWD="$DBPASSWD_" && DBPASSWD_PARAM="-p$DBPASSWD_"
- [ -z "$DBPASSWD_" ] && DBPASSWD="" && DBPASSWD_PARAM=""
- logintry="yes"
- done
- fi
+
+if [[ -z "$NO_PACKAGES" ]]
+then
echo '
#################################################################
##### INSTALL PYTHON PACKAGES #####
#################################################################'
su $SUDO_USER -c "git clone ${GIT_URL} ${OPENMANO_BASEFOLDER}"
su $SUDO_USER -c "cp ${OPENMANO_BASEFOLDER}/.gitignore-common ${OPENMANO_BASEFOLDER}/.gitignore"
- [[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER} checkout tags/v1.0.2"
+ [[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER} checkout tags/v1.1.0"
fi
- echo '
- #################################################################
- ##### CREATE DATABASE #####
- #################################################################'
- echo -e "\nCreating temporary file form MYSQL installation and initialization"
- TEMPFILE="$(mktemp -q --tmpdir "installopenmano.XXXXXX")"
- trap 'rm -f "$TEMPFILE"' EXIT
- chmod 0600 "$TEMPFILE"
- echo -e "[client]\n user='$DBUSER'\n password='$DBPASSWD'">"$TEMPFILE"
-
- if db_exists "mano_db" $TEMPFILE ; then
- if [[ -n $FORCEDB ]]; then
- echo " Deleting previous database mano_db"
- DBDELETEPARAM=""
- [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
- mysqladmin --defaults-extra-file=$TEMPFILE -s drop mano_db $DBDELETEPARAM || ! echo "Could not delete mano_db database" || exit 1
- #echo "REVOKE ALL PRIVILEGES ON mano_db.* FROM 'mano'@'localhost';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- #echo "DELETE USER 'mano'@'localhost';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- mysqladmin --defaults-extra-file=$TEMPFILE -s create mano_db || ! echo "Error creating mano_db database" || exit 1
- echo "DROP USER 'mano'@'localhost';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- echo "CREATE USER 'mano'@'localhost' identified by 'manopw';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- echo "GRANT ALL PRIVILEGES ON mano_db.* TO 'mano'@'localhost';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- echo " Database 'mano_db' created, user 'mano' password 'manopw'"
- else
- echo "Database exists. Use option '--forcedb' to force the deletion of the existing one" && exit 1
- fi
- else
- mysqladmin -u$DBUSER $DBPASSWD_PARAM -s create mano_db || ! echo "Error creating mano_db database" || exit 1
- echo "CREATE USER 'mano'@'localhost' identified by 'manopw';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- echo "GRANT ALL PRIVILEGES ON mano_db.* TO 'mano'@'localhost';" | mysql --defaults-extra-file=$TEMPFILE -s || ! echo "Failed while creating user mano at database" || exit 1
- echo " Database 'mano_db' created, user 'mano' password 'manopw'"
- fi
+echo '
+#################################################################
+##### INSTALLING OVIM LIBRARY #####
+#################################################################'
+su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER} clone ${GIT_OVIM_URL} openvim"
+[[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER}/openvim checkout master"
+# Install debian dependencies before setup.py
+#[ "$_DISTRO" == "Ubuntu" ] && install_packages "git"
+#[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git"
+make -C ${OPENMANO_BASEFOLDER}/openvim lite
+
- echo '
- #################################################################
- ##### INIT DATABASE #####
- #################################################################'
- su $SUDO_USER -c "${OPENMANO_BASEFOLDER}/database_utils/init_mano_db.sh -u mano -p manopw -d mano_db" || ! echo "Failed while initializing main database" || exit 1
-
- echo '
- #################################################################
- ##### CREATE AND INIT MANO_VIM DATABASE #####
- #################################################################'
- # Install mano_vim_db after setup
- su $SUDO_USER -c "${OPENMANO_BASEFOLDER}/openvim/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db" || ! echo "Failed while installing ovim database" || exit 1
-
+
+
if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
then
echo '
su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
fi
+ if [ -z "$NO_DB" ]; then
+ echo '
+ #################################################################
+ ##### INSTALL DATABASE SERVER #####
+ #################################################################'
- ${OPENMANO_BASEFOLDER}/scripts/install-db-server.sh -u $DBUSER $DBPASSWD_PARAM $DB_QUIET $DB_FORCE || exit 1
+ if [ -n "$QUIET_MODE" ]; then
+ DB_QUIET='-q'
+ fi
+ if [ -n "$FORCEDB" ]; then
+ DB_FORCE='--forcedb'
+ fi
++ ${OPENMANO_BASEFOLDER}/database_utils/install-db-server.sh -u $DBUSER $DBPASSWD_PARAM $DB_QUIET $DB_FORCE || exit 1
++echo '
++#################################################################
++##### CREATE AND INIT MANO_VIM DATABASE #####
++#################################################################'
++# Install mano_vim_db after setup
++ ${OPENMANO_BASEFOLDER}/openvim/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db || exit 1
+
+ fi
if [[ -n "$INSTALL_AS_A_SERVICE" ]]
then