--- /dev/null
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_roclient-*.tar.gz osm_roclient.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cp debian/python3-osm-roclient.postinst deb_dist/osm-roclient*/debian/
+ cd deb_dist/osm-roclient*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+ Copyright 2018 Telefonica S.A.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+============
+osm-roclient
+============
+
+osm-roclient is a client for interact with osm-ro server
+
--- /dev/null
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+
+#configure arg-autocomplete for RO-client
+[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
+su $SUDO_USER -c 'activate-global-python-argcomplete3 --user'
+if ! su $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
+then
+ echo " inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
+ su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
+fi
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# PYTHON_ARGCOMPLETE_OK
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano client used to interact with openmano-server (openmanod)
+"""
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ = "$09-oct-2014 09:09:48$"
+__version__ = "0.5.0"
+version_date = "2019-010-04"
+
+from argcomplete.completers import FilesCompleter
+import os
+import argparse
+import argcomplete
+import requests
+import json
+import yaml
+import logging
+#from jsonschema import validate as js_v, exceptions as js_e
+
+
+class ArgumentParserError(Exception):
+ pass
+
+
+class OpenmanoCLIError(Exception):
+ pass
+
+
+class ThrowingArgumentParser(argparse.ArgumentParser):
+ def error(self, message):
+ print("Error: {}".format(message))
+ print()
+ self.print_usage()
+ #self.print_help()
+ print()
+ print("Type 'openmano -h' for help")
+ raise ArgumentParserError
+
+
+def config(args):
+ print("OPENMANO_HOST: {}".format(mano_host))
+ print("OPENMANO_PORT: {}".format(mano_port))
+ if args.n:
+ logger.debug("resolving tenant and datacenter names")
+ mano_tenant_id = "None"
+ mano_tenant_name = "None"
+ mano_datacenter_id = "None"
+ mano_datacenter_name = "None"
+ # WIM additions
+ logger.debug("resolving WIM names")
+ mano_wim_id = "None"
+ mano_wim_name = "None"
+ try:
+ mano_tenant_id = _get_item_uuid("tenants", mano_tenant)
+ URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, mano_tenant_id)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ mano_tenant_name = content["tenant"]["name"]
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, mano_tenant_id,
+ mano_datacenter)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ content = mano_response.json()
+ if "error" not in content:
+ mano_datacenter_id = content["datacenter"]["uuid"]
+ mano_datacenter_name = content["datacenter"]["name"]
+
+ # WIM
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(
+ mano_host, mano_port, mano_tenant_id, mano_wim)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ content = mano_response.json()
+ if "error" not in content:
+ mano_wim_id = content["wim"]["uuid"]
+ mano_wim_name = content["wim"]["name"]
+
+ except OpenmanoCLIError:
+ pass
+ print( "OPENMANO_TENANT: {}".format(mano_tenant))
+ print( " Id: {}".format(mano_tenant_id))
+ print( " Name: {}".format(mano_tenant_name))
+ print( "OPENMANO_DATACENTER: {}".format(mano_datacenter))
+ print( " Id: {}".format(mano_datacenter_id))
+ print( " Name: {}".format(mano_datacenter_name))
+ # WIM
+ print( "OPENMANO_WIM: {}".format( (mano_wim)))
+ print( " Id: {}".format(mano_wim_id))
+ print( " Name: {}".format(mano_wim_name))
+
+ else:
+ print("OPENMANO_TENANT: {}".format(mano_tenant))
+ print("OPENMANO_DATACENTER: {}".format(mano_datacenter))
+ # WIM
+ print("OPENMANO_WIM: {}".format(mano_wim))
+
+def _print_verbose(mano_response, verbose_level=0):
+ content = mano_response.json()
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ if type(content)!=dict or len(content)!=1:
+ # print("Non expected format output")
+ print(str(content))
+ return result
+
+ val = next(iter(content.values()))
+ if type(val)==str:
+ print(val)
+ return result
+ elif type(val) == list:
+ content_list = val
+ elif type(val)==dict:
+ content_list = [val]
+ else:
+ # print("Non expected dict/list format output"
+ print(str(content))
+ return result
+
+ # print(content_list
+ if verbose_level==None:
+ verbose_level=0
+ if verbose_level >= 3:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+
+ if mano_response.status_code == 200:
+ uuid = None
+ for content in content_list:
+ if "uuid" in content:
+ uuid = content['uuid']
+ elif "id" in content:
+ uuid = content['id']
+ elif "vim_id" in content:
+ uuid = content['vim_id']
+ name = content.get('name');
+ if not uuid:
+ uuid = ""
+ if not name:
+ name = ""
+ myoutput = "{:38} {:20}".format(uuid, name)
+ if content.get("status"):
+ myoutput += " {:20}".format(content['status'])
+ elif "enabled" in content and not content["enabled"]:
+ myoutput += " enabled=False".ljust(20)
+ if verbose_level >=1:
+ if content.get('created_at'):
+ myoutput += " {:20}".format(content['created_at'])
+ if content.get('sdn_attached_ports'):
+ #myoutput += " " + str(content['sdn_attached_ports']).ljust(20)
+ myoutput += "\nsdn_attached_ports:\n" + yaml.safe_dump(content['sdn_attached_ports'], indent=4, default_flow_style=False)
+ if verbose_level >=2:
+ new_line='\n'
+ if content.get('type'):
+ myoutput += new_line + " Type: {:29}".format(content['type'])
+ new_line=''
+ if content.get('description'):
+ myoutput += new_line + " Description: {:20}".format(content['description'])
+ print(myoutput)
+ else:
+ print(content['error']['description'])
+ return result
+
+def parser_json_yaml(file_name):
+ try:
+ with open(file_name, "r") as f:
+ text = f.read()
+ except Exception as e:
+ return (False, str(e))
+
+ #Read and parse file
+ if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text):
+ try:
+ config = yaml.load(text, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at line:{} column:{}".format(mark.line+1, mark.column+1)
+ return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos)
+ else: #json
+ try:
+ config = json.loads(text)
+ except Exception as e:
+ return (False, "Error loading file '"+file_name+"' json format error " + str(e) )
+ return True, config
+
+def _load_file_or_yaml(content):
+ '''
+ 'content' can be or a yaml/json file or a text containing a yaml/json text format
+ This function autodetect, trying to load and parse the file,
+ if fails trying to parse the 'content' text
+ Returns the dictionary once parsed, or print an error and finish the program
+ '''
+ #Check config file exists
+ if os.path.isfile(content):
+ r,payload = parser_json_yaml(content)
+ if not r:
+ print(payload)
+ exit(-1)
+ elif "{" in content or ":" in content:
+ try:
+ payload = yaml.load(content)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at position: ({}:{})".format(mark.line+1, mark.column+1)
+ print("Error loading yaml/json text"+error_pos)
+ exit (-1)
+ else:
+ print("'{}' is neither a valid file nor a yaml/json content".format(content))
+ exit(-1)
+ return payload
+
+def _get_item_uuid(item, item_name_id, tenant=None):
+ if tenant:
+ URLrequest = "http://{}:{}/openmano/{}/{}".format(mano_host, mano_port, tenant, item)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}".format(mano_host, mano_port, item)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ # print(content
+ found = 0
+ for i in content[item]:
+ if i["uuid"] == item_name_id:
+ return item_name_id
+ if i["name"] == item_name_id:
+ uuid = i["uuid"]
+ found += 1
+ if item_name_id.startswith("osm_id=") and i.get("osm_id") == item_name_id[7:]:
+ uuid = i["uuid"]
+ found += 1
+ if found == 0:
+ raise OpenmanoCLIError("No {} found with name/uuid '{}'".format(item[:-1], item_name_id))
+ elif found > 1:
+ raise OpenmanoCLIError("{} {} found with name '{}'. uuid must be used".format(found, item, item_name_id))
+ return uuid
+#
+# def check_valid_uuid(uuid):
+# id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+# try:
+# js_v(uuid, id_schema)
+# return True
+# except js_e.ValidationError:
+# return False
+
+def _get_tenant(tenant_name_id = None):
+ if not tenant_name_id:
+ tenant_name_id = mano_tenant
+ if not mano_tenant:
+ raise OpenmanoCLIError("'OPENMANO_TENANT' environment variable is not set")
+ return _get_item_uuid("tenants", tenant_name_id)
+
+def _get_datacenter(datacenter_name_id = None, tenant = "any"):
+ if not datacenter_name_id:
+ datacenter_name_id = mano_datacenter
+ if not datacenter_name_id:
+ raise OpenmanoCLIError("neither 'OPENMANO_DATACENTER' environment variable is set nor --datacenter option is used")
+ return _get_item_uuid("datacenters", datacenter_name_id, tenant)
+
+# WIM
+def _get_wim(wim_name_id = None, tenant = "any"):
+ if not wim_name_id:
+ wim_name_id = mano_wim
+ if not wim_name_id:
+ raise OpenmanoCLIError("neither 'OPENMANO_WIM' environment variable is set nor --wim option is used")
+ return _get_item_uuid("wims", wim_name_id, tenant)
+
+def vnf_create(args):
+ # print("vnf-create", args)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ tenant = _get_tenant()
+ myvnf = _load_file_or_yaml(args.file)
+
+ api_version = ""
+ if "vnfd:vnfd-catalog" in myvnf or "vnfd-catalog" in myvnf:
+ api_version = "/v3"
+ token = "vnfd"
+ vnfd_catalog = myvnf.get("vnfd:vnfd-catalog")
+ if not vnfd_catalog:
+ vnfd_catalog = myvnf.get("vnfd-catalog")
+ vnfds = vnfd_catalog.get("vnfd:vnfd")
+ if not vnfds:
+ vnfds = vnfd_catalog.get("vnfd")
+ vnfd = vnfds[0]
+ vdu_list = vnfd.get("vdu")
+
+ else: # old API
+ api_version = ""
+ token = "vnfs"
+ vnfd = myvnf['vnf']
+ vdu_list = vnfd.get("VNFC")
+
+ if args.name or args.description or args.image_path or args.image_name or args.image_checksum:
+ # TODO, change this for API v3
+ # print(args.name
+ try:
+ if args.name:
+ vnfd['name'] = args.name
+ if args.description:
+ vnfd['description'] = args.description
+ if vdu_list:
+ if args.image_path:
+ index = 0
+ for image_path_ in args.image_path.split(","):
+ # print("image-path", image_path_)
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image'] = image_path_
+ if "image-checksum" in vdu_list[index]:
+ del vdu_list[index]["image-checksum"]
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image"] = image_path_
+ if "image-checksum" in vdu_list[index]["volumes"][0]:
+ del vdu_list[index]["volumes"][0]["image-checksum"]
+ else:
+ vdu_list[index]['VNFC image'] = image_path_
+ if "image name" in vdu_list[index]:
+ del vdu_list[index]["image name"]
+ if "image checksum" in vdu_list[index]:
+ del vdu_list[index]["image checksum"]
+ index += 1
+ if args.image_name: # image name precedes if both are supplied
+ index = 0
+ for image_name_ in args.image_name.split(","):
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image'] = image_name_
+ if "image-checksum" in vdu_list[index]:
+ del vdu_list[index]["image-checksum"]
+ if vdu_list[index].get("alternative-images"):
+ for a_image in vdu_list[index]["alternative-images"]:
+ a_image['image'] = image_name_
+ if "image-checksum" in a_image:
+ del a_image["image-checksum"]
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image"] = image_name_
+ if "image-checksum" in vdu_list[index]["volumes"][0]:
+ del vdu_list[index]["volumes"][0]["image-checksum"]
+ else:
+ vdu_list[index]['image name'] = image_name_
+ if "VNFC image" in vdu_list[index]:
+ del vdu_list[index]["VNFC image"]
+ index += 1
+ if args.image_checksum:
+ index = 0
+ for image_checksum_ in args.image_checksum.split(","):
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image-checksum'] = image_checksum_
+ if vdu_list[index].get("alternative-images"):
+ for a_image in vdu_list[index]["alternative-images"]:
+ a_image['image-checksum'] = image_checksum_
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image-checksum"] = image_checksum_
+ else:
+ vdu_list[index]['image checksum'] = image_checksum_
+ index += 1
+ except (KeyError, TypeError) as e:
+ if str(e) == 'vnf': error_pos= "missing field 'vnf'"
+ elif str(e) == 'name': error_pos= "missing field 'vnf':'name'"
+ elif str(e) == 'description': error_pos= "missing field 'vnf':'description'"
+ elif str(e) == 'VNFC': error_pos= "missing field 'vnf':'VNFC'"
+ elif str(e) == str(index): error_pos= "field 'vnf':'VNFC' must be an array"
+ elif str(e) == 'VNFC image': error_pos= "missing field 'vnf':'VNFC'['VNFC image']"
+ elif str(e) == 'image name': error_pos= "missing field 'vnf':'VNFC'['image name']"
+ elif str(e) == 'image checksum': error_pos= "missing field 'vnf':'VNFC'['image checksum']"
+ else: error_pos="wrong format"
+ print("Wrong VNF descriptor: " + error_pos)
+ return -1
+ payload_req = json.dumps(myvnf)
+
+ # print(payload_req
+
+ URLrequest = "http://{}:{}/openmano{}/{}/{token}".format(mano_host, mano_port, api_version, tenant, token=token)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+
+ return _print_verbose(mano_response, args.verbose)
+
+def vnf_list(args):
+ # print("vnf-list",args
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ if args.name:
+ toshow = _get_item_uuid("vnfs", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/vnfs/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/vnfs".format(mano_host, mano_port, tenant)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4)
+ if args.verbose==None:
+ args.verbose=0
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ if not args.name:
+ if args.verbose >= 3:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ if len(content['vnfs']) == 0:
+ print("No VNFs were found.")
+ return 404 # HTTP_Not_Found
+ for vnf in content['vnfs']:
+ myoutput = "{:38} {:20}".format(vnf['uuid'], vnf['name'])
+ if vnf.get('osm_id') or args.verbose >= 1:
+ myoutput += " osm_id={:20}".format(vnf.get('osm_id'))
+ if args.verbose >= 1:
+ myoutput += " {}".format(vnf['created_at'])
+ print(myoutput)
+ if args.verbose >= 2:
+ print(" Description: {}".format(vnf['description']))
+ # print(" VNF descriptor file: {}".format(vnf['path']))
+ else:
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ vnf = content['vnf']
+ print("{:38} {:20} osm_id={:20} {:20}".format(vnf['uuid'], vnf['name'], vnf.get('osm_id'),
+ vnf['created_at']), end=" ")
+ print(" Description: {}".format(vnf['description']))
+ # print(" VNF descriptor file: {}".format(vnf['path']))
+ print(" VMs:")
+ for vm in vnf['VNFC']:
+ print(" {:20} osm_id={:20} {}".format(vm['name'], vm.get('osm_id'), vm['description']))
+ if len(vnf['nets']) > 0:
+ print(" Internal nets:")
+ for net in vnf['nets']:
+ print(" {:20} {}".format(net['name'], net['description']))
+ if len(vnf['external-connections']) > 0:
+ print(" External interfaces:")
+ for interface in vnf['external-connections']:
+ print(" {:20} {:20} {:20} {:14}".format(
+ interface['external_name'], interface['vm_name'],
+ interface['internal_name'],
+ interface.get('vpci') if interface.get('vpci') else ""))
+ else:
+ print(content['error']['description'])
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+
+def vnf_delete(args):
+ # print("vnf-delete",args
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ todelete = _get_item_uuid("vnfs", args.name, tenant=tenant)
+ if not args.force:
+ r = input("Delete VNF {} (y/N)? ".format(todelete))
+ if not (len(r)>0 and r[0].lower()=="y"):
+ return 0
+ URLrequest = "http://{}:{}/openmano/{}/vnfs/{}".format(mano_host, mano_port, tenant, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+def scenario_create(args):
+ # print("scenario-create",args
+ tenant = _get_tenant()
+ headers_req = {'content-type': 'application/yaml'}
+ myscenario = _load_file_or_yaml(args.file)
+ if "nsd:nsd-catalog" in myscenario or "nsd-catalog" in myscenario:
+ api_version = "/v3"
+ token = "nsd"
+ nsd_catalog = myscenario.get("nsd:nsd-catalog")
+ if not nsd_catalog:
+ nsd_catalog = myscenario.get("nsd-catalog")
+ nsds = nsd_catalog.get("nsd:nsd")
+ if not nsds:
+ nsds = nsd_catalog.get("nsd")
+ nsd = nsds[0]
+ else: # API<v3
+ api_version = ""
+ token = "scenarios"
+ if "scenario" in myscenario:
+ nsd = myscenario["scenario"]
+ else:
+ nsd = myscenario
+ # TODO modify for API v3
+ if args.name:
+ nsd['name'] = args.name
+ if args.description:
+ nsd['description'] = args.description
+ payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+ allow_unicode=True)
+
+ # print(payload_req
+ URLrequest = "http://{host}:{port}/openmano{api}/{tenant}/{token}".format(
+ host=mano_host, port=mano_port, api=api_version, tenant=tenant, token=token)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ return _print_verbose(mano_response, args.verbose)
+
+def scenario_list(args):
+ # print("scenario-list",args
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ if args.name:
+ toshow = _get_item_uuid("scenarios", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/scenarios/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/scenarios".format(mano_host, mano_port, tenant)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4)
+ if args.verbose==None:
+ args.verbose=0
+
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ if not args.name:
+ if args.verbose >= 3:
+ print( yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ if len(content['scenarios']) == 0:
+ print( "No scenarios were found.")
+ return 404 #HTTP_Not_Found
+ for scenario in content['scenarios']:
+ myoutput = "{:38} {:20}".format(scenario['uuid'], scenario['name'])
+ if scenario.get('osm_id') or args.verbose >= 1:
+ myoutput += " osm_id={:20}".format(scenario.get('osm_id'))
+ if args.verbose >= 1:
+ myoutput += " {}".format(scenario['created_at'])
+ print(myoutput)
+ if args.verbose >=2:
+ print(" Description: {}".format(scenario['description']))
+ else:
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ scenario = content['scenario']
+ print("{:38} {:20} osm_id={:20} {:20}".format(scenario['uuid'], scenario['name'], scenario.get('osm_id'),
+ scenario['created_at']), end=" ")
+ print(" Description: {}".format(scenario['description']))
+ print(" VNFs:")
+ for vnf in scenario['vnfs']:
+ print(" {:38} {:20} vnf_index={} {}".format(vnf['vnf_id'], vnf['name'], vnf.get("member_vnf_index"),
+ vnf['description']))
+ if len(scenario['nets']) > 0:
+ print(" nets:")
+ for net in scenario['nets']:
+ description = net['description']
+ if not description: # if description does not exist, description is "-". Valid for external and internal nets.
+ description = '-'
+ vim_id = ""
+ if net.get('vim_id'):
+ vim_id = " vim_id=" + net["vim_id"]
+ external = ""
+ if net["external"]:
+ external = " external"
+ print(" {:20} {:38} {:30}{}{}".format(net['name'], net['uuid'], description, vim_id, external))
+ else:
+ print(content['error']['description'])
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+
+def scenario_delete(args):
+ # print("scenario-delete",args
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ todelete = _get_item_uuid("scenarios", args.name, tenant=tenant)
+ if not args.force:
+ r = input("Delete scenario {} (y/N)? ".format(args.name))
+ if not (len(r)>0 and r[0].lower()=="y"):
+ return 0
+ URLrequest = "http://{}:{}/openmano/{}/scenarios/{}".format(mano_host, mano_port, tenant, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4)
+ if mano_response.status_code == 200:
+ print( content['result'])
+ else:
+ print( content['error']['description'])
+ return result
+
+def scenario_deploy(args):
+ print("This command is deprecated, use 'openmano instance-scenario-create --scenario {} --name {}' instead!!!".format(args.scenario, args.name))
+ print()
+ args.file = None
+ args.netmap_use = None
+ args.netmap_create = None
+ args.keypair = None
+ args.keypair_auto = None
+ return instance_create(args)
+
+# # print("scenario-deploy",args
+# headers_req = {'content-type': 'application/json'}
+# action = {}
+# actionCmd="start"
+# if args.nostart:
+# actionCmd="reserve"
+# action[actionCmd] = {}
+# action[actionCmd]["instance_name"] = args.name
+# if args.datacenter != None:
+# action[actionCmd]["datacenter"] = args.datacenter
+# elif mano_datacenter != None:
+# action[actionCmd]["datacenter"] = mano_datacenter
+#
+# if args.description:
+# action[actionCmd]["description"] = args.description
+# payload_req = json.dumps(action, indent=4)
+# # print(payload_req
+#
+# URLrequest = "http://{}:{}/openmano/{}/scenarios/{}/action".format(mano_host, mano_port, mano_tenant, args.scenario)
+# logger.debug("openmano request: %s", payload_req)
+# mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+# logger.debug("openmano response: %s", mano_response.text )
+# if args.verbose==None:
+# args.verbose=0
+#
+# result = 0 if mano_response.status_code==200 else mano_response.status_code
+# content = mano_response.json()
+# # print(json.dumps(content, indent=4))
+# if args.verbose >= 3:
+# print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+# return result
+#
+# if mano_response.status_code == 200:
+# myoutput = "{} {}".format(content['uuid'].ljust(38),content['name'].ljust(20))
+# if args.verbose >=1:
+# myoutput = "{} {}".format(myoutput, content['created_at'].ljust(20))
+# if args.verbose >=2:
+# myoutput = "{} {} {}".format(myoutput, content['description'].ljust(30))
+# print(myoutput)
+# print("")
+# print("To check the status, run the following command:")
+# print("openmano instance-scenario-list <instance_id>"
+# else:
+# print(content['error']['description'])
+# return result
+
+def scenario_verify(args):
+ # print("scenario-verify",args)
+ tenant = _get_tenant()
+ headers_req = {'content-type': 'application/json'}
+ action = {}
+ action["verify"] = {}
+ action["verify"]["instance_name"] = "scen-verify-return5"
+ payload_req = json.dumps(action, indent=4)
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/scenarios/{}/action".format(mano_host, mano_port, tenant, args.scenario)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+def instance_create(args):
+ tenant = _get_tenant()
+ headers_req = {'content-type': 'application/yaml'}
+ myInstance={"instance": {}, "schema_version": "0.1"}
+ if args.file:
+ instance_dict = _load_file_or_yaml(args.file)
+ if "instance" not in instance_dict:
+ myInstance = {"instance": instance_dict, "schema_version": "0.1"}
+ else:
+ myInstance = instance_dict
+ if args.name:
+ myInstance["instance"]['name'] = args.name
+ if args.description:
+ myInstance["instance"]['description'] = args.description
+ if args.nostart:
+ myInstance["instance"]['action'] = "reserve"
+ #datacenter
+ datacenter = myInstance["instance"].get("datacenter")
+ if args.datacenter != None:
+ datacenter = args.datacenter
+ myInstance["instance"]["datacenter"] = _get_datacenter(datacenter, tenant)
+ #scenario
+ scenario = myInstance["instance"].get("scenario")
+ if args.scenario != None:
+ scenario = args.scenario
+ if not scenario:
+ print("you must provide a scenario in the file descriptor or with --scenario")
+ return -1
+ if isinstance(scenario, str):
+ myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
+ if args.netmap_use:
+ if "networks" not in myInstance["instance"]:
+ myInstance["instance"]["networks"] = {}
+ for net in args.netmap_use:
+ net_comma_list = net.split(",")
+ for net_comma in net_comma_list:
+ net_tuple = net_comma.split("=")
+ if len(net_tuple) != 2:
+ print("error at netmap-use. Expected net-scenario=net-datacenter. ({})?".format(net_comma))
+ return
+ net_scenario = net_tuple[0].strip()
+ net_datacenter = net_tuple[1].strip()
+ if net_scenario not in myInstance["instance"]["networks"]:
+ myInstance["instance"]["networks"][net_scenario] = {}
+ if "sites" not in myInstance["instance"]["networks"][net_scenario]:
+ myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
+ myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-use"] = net_datacenter
+ if args.netmap_create:
+ if "networks" not in myInstance["instance"]:
+ myInstance["instance"]["networks"] = {}
+ for net in args.netmap_create:
+ net_comma_list = net.split(",")
+ for net_comma in net_comma_list:
+ net_tuple = net_comma.split("=")
+ if len(net_tuple) == 1:
+ net_scenario = net_tuple[0].strip()
+ net_datacenter = None
+ elif len(net_tuple) == 2:
+ net_scenario = net_tuple[0].strip()
+ net_datacenter = net_tuple[1].strip()
+ else:
+ print("error at netmap-create. Expected net-scenario=net-datacenter or net-scenario. ({})?".format(
+ net_comma))
+ return
+ if net_scenario not in myInstance["instance"]["networks"]:
+ myInstance["instance"]["networks"][net_scenario] = {}
+ if "sites" not in myInstance["instance"]["networks"][net_scenario]:
+ myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
+ myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-create"] = net_datacenter
+ if args.keypair:
+ if "cloud-config" not in myInstance["instance"]:
+ myInstance["instance"]["cloud-config"] = {}
+ cloud_config = myInstance["instance"]["cloud-config"]
+ for key in args.keypair:
+ index = key.find(":")
+ if index<0:
+ if "key-pairs" not in cloud_config:
+ cloud_config["key-pairs"] = []
+ cloud_config["key-pairs"].append(key)
+ else:
+ user = key[:index]
+ key_ = key[index+1:]
+ key_list = key_.split(",")
+ if "users" not in cloud_config:
+ cloud_config["users"] = []
+ cloud_config["users"].append({"name": user, "key-pairs": key_list })
+ if args.keypair_auto:
+ try:
+ keys=[]
+ home = os.getenv("HOME")
+ user = os.getenv("USER")
+ files = os.listdir(home+'/.ssh')
+ for file in files:
+ if file[-4:] == ".pub":
+ with open(home+'/.ssh/'+file, 'r') as f:
+ keys.append(f.read())
+ if not keys:
+ print("Cannot obtain any public ssh key from '{}'. Try not using --keymap-auto".format(home+'/.ssh'))
+ return 1
+ except Exception as e:
+ print("Cannot obtain any public ssh key. Error '{}'. Try not using --keymap-auto".format(str(e)))
+ return 1
+
+ if "cloud-config" not in myInstance["instance"]:
+ myInstance["instance"]["cloud-config"] = {}
+ cloud_config = myInstance["instance"]["cloud-config"]
+ if "key-pairs" not in cloud_config:
+ cloud_config["key-pairs"] = []
+ if user:
+ if "users" not in cloud_config:
+ cloud_config["users"] = []
+ cloud_config["users"].append({"name": user, "key-pairs": keys })
+
+ payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+ allow_unicode=True)
+ logger.debug("openmano request: %s", payload_req)
+ URLrequest = "http://{}:{}/openmano/{}/instances".format(mano_host, mano_port, tenant)
+ mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if args.verbose >= 3:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+
+ if mano_response.status_code == 200:
+ myoutput = "{:38} {:20}".format(content['uuid'], content['name'])
+ if args.verbose >=1:
+ myoutput = "{} {:20}".format(myoutput, content['created_at'])
+ if args.verbose >=2:
+ myoutput = "{} {:30}".format(myoutput, content['description'])
+ print(myoutput)
+ else:
+ print(content['error']['description'])
+ return result
+
+def instance_scenario_list(args):
+ # print("instance-scenario-list",args)
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ if args.name:
+ toshow = _get_item_uuid("instances", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/instances/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/instances".format(mano_host, mano_port, tenant)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4)
+ if args.verbose==None:
+ args.verbose=0
+
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ if not args.name:
+ if args.verbose >= 3:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ if len(content['instances']) == 0:
+ print("No scenario instances were found.")
+ return result
+ for instance in content['instances']:
+ myoutput = "{:38} {:20}".format(instance['uuid'], instance['name'])
+ if args.verbose >=1:
+ myoutput = "{} {:20}".format(myoutput, instance['created_at'])
+ print(myoutput)
+ if args.verbose >=2:
+ print("Description: {}".format(instance['description']))
+ else:
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ instance = content
+ print("{:38} {:20} {:20}".format(instance['uuid'],instance['name'],instance['created_at']))
+ print("Description: {}".format(instance['description']))
+ print("Template scenario id: {}".format(instance['scenario_id']))
+ print("Template scenario name: {}".format(instance['scenario_name']))
+ print("---------------------------------------")
+ print("VNF instances: {}".format(len(instance['vnfs'])))
+ for vnf in instance['vnfs']:
+ # print(" {} {} Template vnf name: {} Template vnf id: {}".format(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38))
+ print(" {:38} {:20} Template vnf id: {:38}".format(vnf['uuid'], vnf['vnf_name'], vnf['vnf_id']))
+ if len(instance['nets'])>0:
+ print("---------------------------------------")
+ print("Internal nets:")
+ for net in instance['nets']:
+ if net['created']:
+ print(" {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
+ print("---------------------------------------")
+ print("External nets:")
+ for net in instance['nets']:
+ if not net['created']:
+ print(" {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
+ print("---------------------------------------")
+ print("VM instances:")
+ for vnf in instance['vnfs']:
+ for vm in vnf['vms']:
+ print(" {:38} {:20} {:20} {:12} VIM ID: {}".format(vm['uuid'], vnf['vnf_name'], vm['name'],
+ vm['status'], vm['vim_vm_id']))
+ else:
+ print(content['error']['description'])
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+
+def instance_scenario_status(args):
+ print("instance-scenario-status")
+ return 0
+
+def instance_scenario_delete(args):
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ todelete = _get_item_uuid("instances", args.name, tenant=tenant)
+ # print("instance-scenario-delete",args)
+ if not args.force:
+ r = input("Delete scenario instance {} (y/N)? ".format(args.name))
+ if not (len(r)>0 and r[0].lower()=="y"):
+ return
+ URLrequest = "http://{}:{}/openmano/{}/instances/{}".format(mano_host, mano_port, tenant, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+def get_action(args):
+ if not args.all:
+ tenant = _get_tenant()
+ else:
+ tenant = "any"
+ if not args.instance:
+ instance_id = "any"
+ else:
+ instance_id =args.instance
+ action_id = ""
+ if args.id:
+ action_id = "/" + args.id
+ URLrequest = "http://{}:{}/openmano/{}/instances/{}/action{}".format(mano_host, mano_port, tenant, instance_id,
+ action_id)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose == None:
+ args.verbose = 0
+ if args.id != None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+def instance_scenario_action(args):
+ # print("instance-scenario-action", args)
+ tenant = _get_tenant()
+ toact = _get_item_uuid("instances", args.name, tenant=tenant)
+ action={}
+ action[ args.action ] = yaml.safe_load(args.param)
+ if args.vnf:
+ action["vnfs"] = args.vnf
+ if args.vm:
+ action["vms"] = args.vm
+
+ headers_req = {'content-type': 'application/json'}
+ payload_req = json.dumps(action, indent=4)
+ URLrequest = "http://{}:{}/openmano/{}/instances/{}/action".format(mano_host, mano_port, tenant, toact)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ if args.verbose:
+ print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+ return result
+ if "instance_action_id" in content:
+ print("instance_action_id={}".format(content["instance_action_id"]))
+ else:
+ for uuid,c in content.items():
+ print("{:38} {:20} {:20}".format(uuid, c.get('name'), c.get('description')))
+ else:
+ print(content['error']['description'])
+ return result
+
+
+def instance_vnf_list(args):
+ print("instance-vnf-list")
+ return 0
+
+def instance_vnf_status(args):
+ print("instance-vnf-status")
+ return 0
+
+def tenant_create(args):
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ tenant_dict={"name": args.name}
+ if args.description!=None:
+ tenant_dict["description"] = args.description
+ payload_req = json.dumps( {"tenant": tenant_dict })
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/tenants".format(mano_host, mano_port)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ return _print_verbose(mano_response, args.verbose)
+
+def tenant_list(args):
+ # print("tenant-list",args)
+ if args.name:
+ toshow = _get_item_uuid("tenants", args.name)
+ URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/tenants".format(mano_host, mano_port)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+ if args.name!=None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+def tenant_delete(args):
+ # print("tenant-delete",args)
+ todelete = _get_item_uuid("tenants", args.name)
+ if not args.force:
+ r = input("Delete tenant {} (y/N)? ".format(args.name))
+ if not (len(r)>0 and r[0].lower()=="y"):
+ return 0
+ URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+def datacenter_attach(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ datacenter_dict={}
+ if args.vim_tenant_id != None:
+ datacenter_dict['vim_tenant'] = args.vim_tenant_id
+ if args.vim_tenant_name != None:
+ datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+ if args.user != None:
+ datacenter_dict['vim_username'] = args.user
+ if args.password != None:
+ datacenter_dict['vim_password'] = args.password
+ if args.config!=None:
+ datacenter_dict["config"] = _load_file_or_yaml(args.config)
+
+ payload_req = json.dumps( {"datacenter": datacenter_dict })
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = _print_verbose(mano_response, args.verbose)
+ #provide addional information if error
+ if mano_response.status_code != 200:
+ content = mano_response.json()
+ if "already in use for 'name'" in content['error']['description'] and \
+ "to database vim_tenants table" in content['error']['description']:
+ print("Try to specify a different name with --vim-tenant-name")
+ return result
+
+
+def datacenter_edit_vim_tenant(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ if not (args.vim_tenant_id or args.vim_tenant_name or args.user or args.password or args.config):
+ raise OpenmanoCLIError("Error. At least one parameter must be updated.")
+
+ datacenter_dict = {}
+ if args.vim_tenant_id != None:
+ datacenter_dict['vim_tenant'] = args.vim_tenant_id
+ if args.vim_tenant_name != None:
+ datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+ if args.user != None:
+ datacenter_dict['vim_username'] = args.user
+ if args.password != None:
+ datacenter_dict['vim_password'] = args.password
+ if args.config != None:
+ datacenter_dict["config"] = _load_file_or_yaml(args.config)
+ payload_req = json.dumps({"datacenter": datacenter_dict})
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+
+ return result
+
+def datacenter_detach(args):
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+ mano_response = requests.delete(URLrequest, headers=headers_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+def datacenter_create(args):
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ datacenter_dict={"name": args.name, "vim_url": args.url}
+ if args.description!=None:
+ datacenter_dict["description"] = args.description
+ if args.type!=None:
+ datacenter_dict["type"] = args.type
+ if args.url!=None:
+ datacenter_dict["vim_url_admin"] = args.url_admin
+ if args.config!=None:
+ datacenter_dict["config"] = _load_file_or_yaml(args.config)
+ if args.sdn_controller!=None:
+ tenant = _get_tenant()
+ sdn_controller = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+ if not 'config' in datacenter_dict:
+ datacenter_dict['config'] = {}
+ datacenter_dict['config']['sdn-controller'] = sdn_controller
+ payload_req = json.dumps( {"datacenter": datacenter_dict })
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/datacenters".format(mano_host, mano_port)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ return _print_verbose(mano_response, args.verbose)
+
+def datacenter_delete(args):
+ # print("datacenter-delete",args)
+ todelete = _get_item_uuid("datacenters", args.name, "any")
+ if not args.force:
+ r = input("Delete datacenter {} (y/N)? ".format(args.name))
+ if not (len(r)>0 and r[0].lower()=="y"):
+ return 0
+ URLrequest = "http://{}:{}/openmano/datacenters/{}".format(mano_host, mano_port, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+
+def datacenter_list(args):
+ # print("datacenter-list",args)
+ tenant='any' if args.all else _get_tenant()
+
+ if args.name:
+ toshow = _get_item_uuid("datacenters", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/datacenters".format(mano_host, mano_port, tenant)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+ if args.name!=None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_sdn_port_mapping_set(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ if not args.file:
+ raise OpenmanoCLIError(
+ "No yaml/json has been provided specifying the SDN port mapping")
+ sdn_port_mapping = _load_file_or_yaml(args.file)
+ payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
+
+ # read
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ port_mapping = mano_response.json()
+ if mano_response.status_code != 200:
+ str(mano_response.json())
+ raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
+ if len(port_mapping["sdn_port_mapping"]["ports_mapping"]) > 0:
+ if not args.force:
+ r = input("Datacenter {} already contains a port mapping. Overwrite? (y/N)? ".format(datacenter))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ # clear
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ if mano_response.status_code != 200:
+ return _print_verbose(mano_response, args.verbose)
+
+ # set
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_sdn_port_mapping_list(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name, tenant)
+
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+
+ return _print_verbose(mano_response, 4)
+
+
+def datacenter_sdn_port_mapping_clear(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.name, tenant)
+
+ if not args.force:
+ r = input("Clean SDN port mapping for datacenter {} (y/N)? ".format(datacenter))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+
+ return _print_verbose(mano_response, args.verbose)
+
+
+def sdn_controller_create(args):
+ tenant = _get_tenant()
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ error_msg=[]
+ if not args.ip: error_msg.append("'ip'")
+ if not args.port: error_msg.append("'port'")
+ if not args.dpid: error_msg.append("'dpid'")
+ if not args.type: error_msg.append("'type'")
+ if error_msg:
+ raise OpenmanoCLIError("The following arguments are required: " + ",".join(error_msg))
+
+ controller_dict = {}
+ controller_dict['name'] = args.name
+ controller_dict['ip'] = args.ip
+ controller_dict['port'] = int(args.port)
+ controller_dict['dpid'] = args.dpid
+ controller_dict['type'] = args.type
+ if args.description != None:
+ controller_dict['description'] = args.description
+ if args.user != None:
+ controller_dict['user'] = args.user
+ if args.password != None:
+ controller_dict['password'] = args.password
+
+ payload_req = json.dumps({"sdn_controller": controller_dict})
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/sdn_controllers".format(mano_host, mano_port, tenant)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ return result
+
+
+def sdn_controller_edit(args):
+ tenant = _get_tenant()
+ controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ controller_dict = {}
+ if args.new_name:
+ controller_dict['name'] = args.new_name
+ if args.ip:
+ controller_dict['ip'] = args.ip
+ if args.port:
+ controller_dict['port'] = int(args.port)
+ if args.dpid:
+ controller_dict['dpid'] = args.dpid
+ if args.type:
+ controller_dict['type'] = args.type
+ if args.description:
+ controller_dict['description'] = args.description
+ if args.user:
+ controller_dict['user'] = args.user
+ if args.password:
+ controller_dict['password'] = args.password
+
+ if not controller_dict:
+ raise OpenmanoCLIError("At least one parameter must be edited")
+
+ if not args.force:
+ r = input("Update SDN controller {} (y/N)? ".format(args.name))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ payload_req = json.dumps({"sdn_controller": controller_dict})
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, controller_uuid)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ return result
+
+
+def sdn_controller_list(args):
+ tenant = _get_tenant()
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ if args.name:
+ toshow = _get_item_uuid("sdn_controllers", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/sdn_controllers".format(mano_host, mano_port, tenant)
+ # print(URLrequest)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+ if args.name!=None:
+ args.verbose += 1
+
+ # json.dumps(mano_response.json(), indent=4)
+ return _print_verbose(mano_response, args.verbose)
+
+
+def sdn_controller_delete(args):
+ tenant = _get_tenant()
+ controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+
+ if not args.force:
+ r = input("Delete SDN controller {} (y/N)? ".format(args.name))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, controller_uuid)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ return _print_verbose(mano_response, args.verbose)
+
+def vim_action(args):
+ # print("datacenter-net-action",args)
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.datacenter, tenant)
+ if args.verbose==None:
+ args.verbose=0
+ if args.action=="list":
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s".format(mano_host, mano_port, tenant, datacenter, args.item)
+ if args.name!=None:
+ args.verbose += 1
+ URLrequest += "/" + args.name
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ return _print_verbose(mano_response, args.verbose)
+ elif args.action=="delete":
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s/{}".format(mano_host, mano_port, tenant, datacenter, args.item, args.name)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+ result = 0 if mano_response.status_code==200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+ elif args.action=="create":
+ headers_req = {'content-type': 'application/yaml'}
+ if args.file:
+ create_dict = _load_file_or_yaml(args.file)
+ if args.item not in create_dict:
+ create_dict = {args.item: create_dict}
+ else:
+ create_dict = {args.item:{}}
+ if args.name:
+ create_dict[args.item]['name'] = args.name
+ #if args.description:
+ # create_dict[args.item]['description'] = args.description
+ if args.item=="network":
+ if args.bind_net:
+ create_dict[args.item]['bind_net'] = args.bind_net
+ if args.type:
+ create_dict[args.item]['type'] = args.type
+ if args.shared:
+ create_dict[args.item]['shared'] = args.shared
+ if "name" not in create_dict[args.item]:
+ print("You must provide a name in the descriptor file or with the --name option")
+ return
+ payload_req = yaml.safe_dump(create_dict, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+ allow_unicode=True)
+ logger.debug("openmano request: %s", payload_req)
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s".format(mano_host, mano_port, tenant, datacenter, args.item)
+ mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+ return _print_verbose(mano_response, args.verbose)
+
+
+def _get_items(item, item_name_id=None, datacenter=None, tenant=None):
+ URLrequest = "http://{}:{}/openmano".format(mano_host, mano_port)
+ if tenant:
+ URLrequest += "/" + tenant
+ if datacenter:
+ URLrequest += "/vim/" + datacenter
+ if item:
+ URLrequest += "/" + item +"s"
+ if item_name_id:
+ URLrequest += "/" + item_name_id
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text )
+
+ return mano_response
+
+
+def vim_net_sdn_attach(args):
+ #Verify the network exists in the vim
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.datacenter, tenant)
+ result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
+ content = yaml.load(result.content)
+ if 'networks' in content:
+ raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
+ if 'error' in content:
+ raise OpenmanoCLIError(yaml.safe_dump(content))
+ network_uuid = content['network']['id']
+
+ #Make call to attach the dataplane port to the SND network associated to the vim network
+ headers_req = {'content-type': 'application/yaml'}
+ payload_req = {'port': args.port}
+ if args.vlan:
+ payload_req['vlan'] = int(args.vlan)
+ if args.mac:
+ payload_req['mac'] = args.mac
+
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/attach".format(mano_host, mano_port, tenant, datacenter, network_uuid)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=json.dumps(payload_req))
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ return result
+
+
+def vim_net_sdn_detach(args):
+ if not args.all and not args.id:
+ print("--all or --id must be used")
+ return 1
+
+ # Verify the network exists in the vim
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.datacenter, tenant)
+ result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
+ content = yaml.load(result.content)
+ if 'networks' in content:
+ raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
+ if 'error' in content:
+ raise OpenmanoCLIError(yaml.safe_dump(content))
+ network_uuid = content['network']['id']
+
+ if not args.force:
+ r = input("Confirm action' (y/N)? ")
+ if len(r) == 0 or r[0].lower() != "y":
+ return 0
+
+ if args.id:
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/detach/{}".format(
+ mano_host, mano_port, tenant, datacenter, network_uuid, args.id)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/detach".format(
+ mano_host, mano_port, tenant, datacenter, network_uuid)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ return result
+
+
+def datacenter_net_action(args):
+ if args.action == "net-update":
+ print("This command is deprecated, use 'openmano datacenter-netmap-delete --all' and 'openmano"
+ " datacenter-netmap-import' instead!!!")
+ print()
+ args.action = "netmap-delete"
+ args.netmap = None
+ args.all = True
+ r = datacenter_netmap_action(args)
+ if r == 0:
+ args.force = True
+ args.action = "netmap-import"
+ r = datacenter_netmap_action(args)
+ return r
+
+ if args.action == "net-edit":
+ args.netmap = args.net
+ args.name = None
+ elif args.action == "net-list":
+ args.netmap = None
+ elif args.action == "net-delete":
+ args.netmap = args.net
+ args.all = False
+
+ args.action = "netmap" + args.action[3:]
+ args.vim_name=None
+ args.vim_id=None
+ print("This command is deprecated, use 'openmano datacenter-{}' instead!!!".format(args.action))
+ print()
+ return datacenter_netmap_action(args)
+
+def datacenter_netmap_action(args):
+ tenant = _get_tenant()
+ datacenter = _get_datacenter(args.datacenter, tenant)
+ # print("datacenter_netmap_action",args)
+ payload_req = None
+ if args.verbose==None:
+ args.verbose=0
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/netmaps".format(mano_host, mano_port, tenant, datacenter)
+
+ if args.action=="netmap-list":
+ if args.netmap:
+ URLrequest += "/" + args.netmap
+ args.verbose += 1
+ mano_response = requests.get(URLrequest)
+
+ elif args.action=="netmap-delete":
+ if args.netmap and args.all:
+ print("you can not use a netmap name and the option --all at the same time")
+ return 1
+ if args.netmap:
+ force_text= "Delete default netmap '{}' from datacenter '{}' (y/N)? ".format(args.netmap, datacenter)
+ URLrequest += "/" + args.netmap
+ elif args.all:
+ force_text="Delete all default netmaps from datacenter '{}' (y/N)? ".format(datacenter)
+ else:
+ print("you must specify a netmap name or the option --all")
+ return 1
+ if not args.force:
+ r = input(force_text)
+ if len(r)>0 and r[0].lower()=="y":
+ pass
+ else:
+ return 0
+ mano_response = requests.delete(URLrequest, headers=headers_req)
+ elif args.action=="netmap-import":
+ if not args.force:
+ r = input("Create all the available networks from datacenter '{}' as default netmaps (y/N)? ".format(datacenter))
+ if len(r)>0 and r[0].lower()=="y":
+ pass
+ else:
+ return 0
+ URLrequest += "/upload"
+ mano_response = requests.post(URLrequest, headers=headers_req)
+ elif args.action=="netmap-edit" or args.action=="netmap-create":
+ if args.file:
+ payload = _load_file_or_yaml(args.file)
+ else:
+ payload = {}
+ if "netmap" not in payload:
+ payload = {"netmap": payload}
+ if args.name:
+ payload["netmap"]["name"] = args.name
+ if args.vim_id:
+ payload["netmap"]["vim_id"] = args.vim_id
+ if args.action=="netmap-create" and args.vim_name:
+ payload["netmap"]["vim_name"] = args.vim_name
+ payload_req = json.dumps(payload)
+ logger.debug("openmano request: %s", payload_req)
+
+ if args.action=="netmap-edit" and not args.force:
+ if len(payload["netmap"]) == 0:
+ print("You must supply some parameter to edit")
+ return 1
+ r = input("Edit default netmap '{}' from datacenter '{}' (y/N)? ".format(args.netmap, datacenter))
+ if len(r)>0 and r[0].lower()=="y":
+ pass
+ else:
+ return 0
+ URLrequest += "/" + args.netmap
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ else: #netmap-create
+ if "vim_name" not in payload["netmap"] and "vim_id" not in payload["netmap"]:
+ print("You must supply either --vim-id or --vim-name option; or include one of them in the file"
+ " descriptor")
+ return 1
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+
+ logger.debug("openmano response: %s", mano_response.text )
+ return _print_verbose(mano_response, args.verbose)
+
+
+def element_edit(args):
+ element = _get_item_uuid(args.element, args.name)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/{}/{}".format(mano_host, mano_port, args.element, element)
+ payload=_load_file_or_yaml(args.file)
+ if args.element[:-1] not in payload:
+ payload = {args.element[:-1]: payload }
+ payload_req = json.dumps(payload)
+
+ # print(payload_req)
+ if not args.force or (args.name==None and args.filer==None):
+ r = input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ")
+ if len(r)>0 and r[0].lower()=="y":
+ pass
+ else:
+ return 0
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text )
+ if args.verbose==None:
+ args.verbose=0
+ if args.name!=None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_edit(args):
+ tenant = _get_tenant()
+ element = _get_item_uuid('datacenters', args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/datacenters/{}".format(mano_host, mano_port, element)
+
+ has_arguments = False
+ if args.file != None:
+ has_arguments = True
+ payload = _load_file_or_yaml(args.file)
+ else:
+ payload = {}
+
+ if args.sdn_controller != None:
+ has_arguments = True
+ if not 'config' in payload:
+ payload['config'] = {}
+ if not 'sdn-controller' in payload['config']:
+ payload['config']['sdn-controller'] = {}
+ if args.sdn_controller == 'null':
+ payload['config']['sdn-controller'] = None
+ else:
+ payload['config']['sdn-controller'] = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+
+ if not has_arguments:
+ raise OpenmanoCLIError("At least one argument must be provided to modify the datacenter")
+
+ if 'datacenter' not in payload:
+ payload = {'datacenter': payload}
+ payload_req = json.dumps(payload)
+
+ # print(payload_req)
+ if not args.force or (args.name == None and args.filer == None):
+ r = input(" Edit datacenter " + args.name + " (y/N)? ")
+ if len(r) > 0 and r[0].lower() == "y":
+ pass
+ else:
+ return 0
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ if args.verbose == None:
+ args.verbose = 0
+ if args.name != None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+
+# WIM
+def wim_account_create(args):
+ tenant = _get_tenant()
+ wim = _get_wim(args.name)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ wim_dict = {}
+ if args.account_name is not None:
+ wim_dict['name'] = args.account_name
+ if args.user is not None:
+ wim_dict['user'] = args.user
+ if args.password is not None:
+ wim_dict['password'] = args.password
+ if args.config is not None:
+ wim_dict["config"] = _load_file_or_yaml(args.config)
+
+ payload_req = json.dumps({"wim_account": wim_dict})
+
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ # provide addional information if error
+ if mano_response.status_code != 200:
+ content = mano_response.json()
+ if "already in use for 'name'" in content['error']['description'] and \
+ "to database wim_tenants table" in content['error']['description']:
+ print("Try to specify a different name with --wim-tenant-name")
+ return result
+
+
+def wim_account_delete(args):
+ if args.all:
+ tenant = "any"
+ else:
+ tenant = _get_tenant()
+ wim = _get_wim(args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+ mano_response = requests.delete(URLrequest, headers=headers_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ result = 0 if mano_response.status_code == 200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+
+def wim_account_edit(args):
+ tenant = _get_tenant()
+ wim = _get_wim(args.name)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ wim_dict = {}
+ if not args.account_name:
+ wim_dict['name'] = args.vim_tenant_name
+ if not args.user:
+ wim_dict['user'] = args.user
+ if not args.password:
+ wim_dict['password'] = args.password
+ if not args.config:
+ wim_dict["config"] = _load_file_or_yaml(args.config)
+
+ payload_req = json.dumps({"wim_account": wim_dict})
+
+ # print(payload_req)
+
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = _print_verbose(mano_response, args.verbose)
+ # provide addional information if error
+ if mano_response.status_code != 200:
+ content = mano_response.json()
+ if "already in use for 'name'" in content['error']['description'] and \
+ "to database wim_tenants table" in content['error']['description']:
+ print("Try to specify a different name with --wim-tenant-name")
+ return result
+
+def wim_create(args):
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ wim_dict = {"name": args.name, "wim_url": args.url}
+ if args.description != None:
+ wim_dict["description"] = args.description
+ if args.type != None:
+ wim_dict["type"] = args.type
+ if args.config != None:
+ wim_dict["config"] = _load_file_or_yaml(args.config)
+
+ payload_req = json.dumps({"wim": wim_dict})
+
+ URLrequest = "http://{}:{}/openmano/wims".format(mano_host, mano_port)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ return _print_verbose(mano_response, args.verbose)
+
+
+def wim_edit(args):
+ tenant = _get_tenant()
+ element = _get_item_uuid('wims', args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/wims/{}".format(mano_host, mano_port, element)
+
+ has_arguments = False
+ if args.file != None:
+ has_arguments = True
+ payload = _load_file_or_yaml(args.file)
+ else:
+ payload = {}
+
+ if not has_arguments:
+ raise OpenmanoCLIError("At least one argument must be provided to modify the wim")
+
+ if 'wim' not in payload:
+ payload = {'wim': payload}
+ payload_req = json.dumps(payload)
+
+ # print(payload_req)
+ if not args.force or (args.name == None and args.filer == None):
+ r = input(" Edit wim " + args.name + " (y/N)? ")
+ if len(r) > 0 and r[0].lower() == "y":
+ pass
+ else:
+ return 0
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ if args.verbose == None:
+ args.verbose = 0
+ if args.name != None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+
+def wim_delete(args):
+ # print("wim-delete",args)
+ todelete = _get_item_uuid("wims", args.name, "any")
+ if not args.force:
+ r = input("Delete wim {} (y/N)? ".format(args.name))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+ URLrequest = "http://{}:{}/openmano/wims/{}".format(mano_host, mano_port, todelete)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ result = 0 if mano_response.status_code == 200 else mano_response.status_code
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4)
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+
+def wim_list(args):
+ # print("wim-list",args)
+ tenant = 'any' if args.all else _get_tenant()
+
+ if args.name:
+ toshow = _get_item_uuid("wims", args.name, tenant)
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, toshow)
+ else:
+ URLrequest = "http://{}:{}/openmano/{}/wims".format(mano_host, mano_port, tenant)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ if args.verbose == None:
+ args.verbose = 0
+ if args.name != None:
+ args.verbose += 1
+ return _print_verbose(mano_response, args.verbose)
+
+
+def wim_port_mapping_set(args):
+ tenant = _get_tenant()
+ wim = _get_wim(args.name, tenant)
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+ if not args.file:
+ raise OpenmanoCLIError(
+ "No yaml/json has been provided specifying the WIM port mapping")
+ wim_port_mapping = _load_file_or_yaml(args.file)
+
+ payload_req = json.dumps({"wim_port_mapping": wim_port_mapping})
+
+ # read
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ port_mapping = mano_response.json()
+
+ if mano_response.status_code != 200:
+ str(mano_response.json())
+ raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
+ # TODO: check this if statement
+ if len(port_mapping["wim_port_mapping"]) > 0:
+ if not args.force:
+ r = input("WIM {} already contains a port mapping. Overwrite? (y/N)? ".format(wim))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ # clear
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ if mano_response.status_code != 200:
+ return _print_verbose(mano_response, args.verbose)
+
+ # set
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+ logger.debug("openmano request: %s", payload_req)
+ mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_list(args):
+ tenant = _get_tenant()
+ wim = _get_wim(args.name, tenant)
+
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+ mano_response = requests.get(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+
+ return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_clear(args):
+ tenant = _get_tenant()
+ wim = _get_wim(args.name, tenant)
+
+ if not args.force:
+ r = input("Clear WIM port mapping for wim {} (y/N)? ".format(wim))
+ if not (len(r) > 0 and r[0].lower() == "y"):
+ return 0
+
+ URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+ mano_response = requests.delete(URLrequest)
+ logger.debug("openmano response: %s", mano_response.text)
+ content = mano_response.json()
+ # print(json.dumps(content, indent=4))
+ result = 0 if mano_response.status_code == 200 else mano_response.status_code
+ if mano_response.status_code == 200:
+ print(content['result'])
+ else:
+ print(content['error']['description'])
+ return result
+
+
+def version(args):
+ headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+ URLrequest = "http://{}:{}/openmano/version".format(mano_host, mano_port)
+
+ mano_response = requests.get(URLrequest, headers=headers_req)
+ logger.debug("openmano response: %s", mano_response.text)
+ print(mano_response.text)
+
+
+def main():
+ global mano_host
+ global mano_port
+ global mano_tenant
+ global logger
+ mano_tenant = os.getenv('OPENMANO_TENANT', None)
+ mano_host = os.getenv('OPENMANO_HOST',"localhost")
+ mano_port = os.getenv('OPENMANO_PORT',"9090")
+ mano_datacenter = os.getenv('OPENMANO_DATACENTER',None)
+ # WIM env variable for default WIM
+ mano_wim = os.getenv('OPENMANO_WIM', None)
+
+ main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)')
+ main_parser.add_argument('--version', action='version', help="get version of this client",
+ version='%(prog)s client version ' + __version__ +
+ " (Note: use '%(prog)s version' to get server version)")
+
+ subparsers = main_parser.add_subparsers(help='commands')
+
+ parent_parser = argparse.ArgumentParser(add_help=False)
+ parent_parser.add_argument('--verbose', '-v', action='count', help="increase verbosity level. Use several times")
+ parent_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+
+ config_parser = subparsers.add_parser('config', parents=[parent_parser], help="prints configuration values")
+ config_parser.add_argument("-n", action="store_true", help="resolves tenant and datacenter names")
+ config_parser.set_defaults(func=config)
+
+ version_parser = subparsers.add_parser('version', parents=[parent_parser], help="get server version")
+ version_parser.set_defaults(func=version)
+
+ vnf_create_parser = subparsers.add_parser('vnf-create', parents=[parent_parser], help="adds a vnf into the catalogue")
+ vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF").completer = FilesCompleter
+ vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+ vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+ vnf_create_parser.add_argument("--image-path", action="store", help="change image path locations (overwritten)")
+ vnf_create_parser.add_argument("--image-name", action="store", help="change image name (overwritten)")
+ vnf_create_parser.add_argument("--image-checksum", action="store", help="change image checksum (overwritten)")
+ vnf_create_parser.set_defaults(func=vnf_create)
+
+ vnf_list_parser = subparsers.add_parser('vnf-list', parents=[parent_parser], help="lists information about a vnf")
+ vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF")
+ vnf_list_parser.add_argument("-a", "--all", action="store_true", help="shows all vnfs, not only the owned or public ones")
+ #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true")
+ vnf_list_parser.set_defaults(func=vnf_list)
+
+ vnf_delete_parser = subparsers.add_parser('vnf-delete', parents=[parent_parser], help="deletes a vnf from the catalogue")
+ vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted")
+ vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ vnf_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+ vnf_delete_parser.set_defaults(func=vnf_delete)
+
+ scenario_create_parser = subparsers.add_parser('scenario-create', parents=[parent_parser], help="adds a scenario into the OPENMANO DB")
+ scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario").completer = FilesCompleter
+ scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)")
+ scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)")
+ scenario_create_parser.set_defaults(func=scenario_create)
+
+ scenario_list_parser = subparsers.add_parser('scenario-list', parents=[parent_parser], help="lists information about a scenario")
+ scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario")
+ #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true")
+ scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all scenarios, not only the owned or public ones")
+ scenario_list_parser.set_defaults(func=scenario_list)
+
+ scenario_delete_parser = subparsers.add_parser('scenario-delete', parents=[parent_parser], help="deletes a scenario from the OPENMANO DB")
+ scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted")
+ scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+ scenario_delete_parser.set_defaults(func=scenario_delete)
+
+ scenario_deploy_parser = subparsers.add_parser('scenario-deploy', parents=[parent_parser], help="deploys a scenario")
+ scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed")
+ scenario_deploy_parser.add_argument("name", action="store", help="name of the instance")
+ scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+ scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+ scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance")
+ scenario_deploy_parser.set_defaults(func=scenario_deploy)
+
+ scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)")
+ scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified")
+ scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+ scenario_deploy_parser.set_defaults(func=scenario_verify)
+
+ instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', parents=[parent_parser], help="deploys a scenario")
+ instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text")
+ instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed")
+ instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance")
+ instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+ instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+ instance_scenario_create_parser.add_argument("--netmap-use", action="append", type=str, dest="netmap_use", help="indicates a datacenter network to map a scenario network 'scenario-network=datacenter-network'. Can be used several times")
+ instance_scenario_create_parser.add_argument("--netmap-create", action="append", type=str, dest="netmap_create", help="the scenario network must be created at datacenter 'scenario-network[=datacenter-network-name]' . Can be used several times")
+ instance_scenario_create_parser.add_argument("--keypair", action="append", type=str, dest="keypair", help="public key for ssh access. Format '[user:]key1[,key2...]'. Can be used several times")
+ instance_scenario_create_parser.add_argument("--keypair-auto", action="store_true", dest="keypair_auto", help="Inject the user ssh-keys found at $HOME/.ssh directory")
+ instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance")
+ instance_scenario_create_parser.set_defaults(func=instance_create)
+
+ instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', parents=[parent_parser], help="lists information about a scenario instance")
+ instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance")
+ instance_scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all instance-scenarios, not only the owned")
+ instance_scenario_list_parser.set_defaults(func=instance_scenario_list)
+
+ instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', parents=[parent_parser], help="deletes a scenario instance (and deletes all VM and net instances in VIM)")
+ instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted")
+ instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ instance_scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+ instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete)
+
+ instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', parents=[parent_parser], help="invoke an action over part or the whole scenario instance")
+ instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+ instance_scenario_action_parser.add_argument("action", action="store", type=str, \
+ choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console", "add_public_key","vdu-scaling"],\
+ help="action to send")
+ instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console: novnc; reboot: type; vdu-scaling: '[{vdu-id: xxx, type: create|delete, count: 1}]'")
+ instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)")
+ instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)")
+ instance_scenario_action_parser.set_defaults(func=instance_scenario_action)
+
+ action_parser = subparsers.add_parser('action-list', parents=[parent_parser], help="get action over an instance status")
+ action_parser.add_argument("id", nargs='?', action="store", help="action id")
+ action_parser.add_argument("--instance", action="store", help="fitler by this instance_id")
+ action_parser.add_argument("--all", action="store", help="Not filter by tenant")
+ action_parser.set_defaults(func=get_action)
+
+ #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance")
+ #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+ #instance_scenario_status_parser.set_defaults(func=instance_scenario_status)
+
+ tenant_create_parser = subparsers.add_parser('tenant-create', parents=[parent_parser], help="creates a new tenant")
+ tenant_create_parser.add_argument("name", action="store", help="name for the tenant")
+ tenant_create_parser.add_argument("--description", action="store", help="description of the tenant")
+ tenant_create_parser.set_defaults(func=tenant_create)
+
+ tenant_delete_parser = subparsers.add_parser('tenant-delete', parents=[parent_parser], help="deletes a tenant from the catalogue")
+ tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted")
+ tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ tenant_delete_parser.set_defaults(func=tenant_delete)
+
+ tenant_list_parser = subparsers.add_parser('tenant-list', parents=[parent_parser], help="lists information about a tenant")
+ tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant")
+ tenant_list_parser.set_defaults(func=tenant_list)
+
+ element_edit_parser = subparsers.add_parser('tenant-edit', parents=[parent_parser], help="edits one tenant")
+ element_edit_parser.add_argument("name", help="name or uuid of the tenant")
+ element_edit_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+ element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+ element_edit_parser.set_defaults(func=element_edit, element='tenants')
+
+ datacenter_create_parser = subparsers.add_parser('datacenter-create', parents=[parent_parser], help="creates a new datacenter")
+ datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter")
+ datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter")
+ datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter")
+ datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)")
+ datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
+ datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter")
+ datacenter_create_parser.add_argument("--sdn-controller", action="store", help="Name or uuid of the SDN controller to be used", dest='sdn_controller')
+ datacenter_create_parser.set_defaults(func=datacenter_create)
+
+ datacenter_delete_parser = subparsers.add_parser('datacenter-delete', parents=[parent_parser], help="deletes a datacenter from the catalogue")
+ datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted")
+ datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ datacenter_delete_parser.set_defaults(func=datacenter_delete)
+
+ datacenter_edit_parser = subparsers.add_parser('datacenter-edit', parents=[parent_parser], help="Edit datacenter")
+ datacenter_edit_parser.add_argument("name", help="name or uuid of the datacenter")
+ datacenter_edit_parser.add_argument("--file", help="json/yaml text or file with the changes").completer = FilesCompleter
+ datacenter_edit_parser.add_argument("--sdn-controller", action="store",
+ help="Name or uuid of the SDN controller to be used. Specify 'null' to clear entry", dest='sdn_controller')
+ datacenter_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+ datacenter_edit_parser.set_defaults(func=datacenter_edit)
+
+ datacenter_list_parser = subparsers.add_parser('datacenter-list', parents=[parent_parser], help="lists information about a datacenter")
+ datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter")
+ datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant")
+ datacenter_list_parser.set_defaults(func=datacenter_list)
+
+ datacenter_attach_parser = subparsers.add_parser('datacenter-attach', parents=[parent_parser], help="associates a datacenter to the operating tenant")
+ datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter")
+ datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default")
+ datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+ datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+ datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+ datacenter_attach_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
+ datacenter_attach_parser.set_defaults(func=datacenter_attach)
+
+ datacenter_edit_vim_tenant_parser = subparsers.add_parser('datacenter-edit-vim-tenant', parents=[parent_parser],
+ help="Edit the association of a datacenter to the operating tenant")
+ datacenter_edit_vim_tenant_parser.add_argument("name", help="name or uuid of the datacenter")
+ datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-id', action='store',
+ help="specify a datacenter tenant to use. A new one is created by default")
+ datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+ datacenter_edit_vim_tenant_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+ datacenter_edit_vim_tenant_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+ datacenter_edit_vim_tenant_parser.add_argument("--config", action="store",
+ help="aditional configuration in json/yaml format")
+ datacenter_edit_vim_tenant_parser.set_defaults(func=datacenter_edit_vim_tenant)
+
+ datacenter_detach_parser = subparsers.add_parser('datacenter-detach', parents=[parent_parser], help="removes the association between a datacenter and the operating tenant")
+ datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter")
+ datacenter_detach_parser.add_argument("-a", "--all", action="store_true", help="removes all associations from this datacenter")
+ datacenter_detach_parser.set_defaults(func=datacenter_detach)
+
+ #=======================datacenter_sdn_port_mapping_xxx section=======================
+ #datacenter_sdn_port_mapping_set
+ datacenter_sdn_port_mapping_set_parser = subparsers.add_parser('datacenter-sdn-port-mapping-set',
+ parents=[parent_parser],
+ help="Load a file with the mapping of physical ports "
+ "and the ports of the dataplaneswitch controlled "
+ "by a datacenter")
+ datacenter_sdn_port_mapping_set_parser.add_argument("name", action="store", help="specifies the datacenter")
+ datacenter_sdn_port_mapping_set_parser.add_argument("file",
+ help="json/yaml text or file with the port mapping").completer = FilesCompleter
+ datacenter_sdn_port_mapping_set_parser.add_argument("-f", "--force", action="store_true",
+ help="forces overwriting without asking")
+ datacenter_sdn_port_mapping_set_parser.set_defaults(func=datacenter_sdn_port_mapping_set)
+
+ #datacenter_sdn_port_mapping_list
+ datacenter_sdn_port_mapping_list_parser = subparsers.add_parser('datacenter-sdn-port-mapping-list',
+ parents=[parent_parser],
+ help="Show the SDN port mapping in a datacenter")
+ datacenter_sdn_port_mapping_list_parser.add_argument("name", action="store", help="specifies the datacenter")
+ datacenter_sdn_port_mapping_list_parser.set_defaults(func=datacenter_sdn_port_mapping_list)
+
+ # datacenter_sdn_port_mapping_clear
+ datacenter_sdn_port_mapping_clear_parser = subparsers.add_parser('datacenter-sdn-port-mapping-clear',
+ parents=[parent_parser],
+ help="Clean the the SDN port mapping in a datacenter")
+ datacenter_sdn_port_mapping_clear_parser.add_argument("name", action="store",
+ help="specifies the datacenter")
+ datacenter_sdn_port_mapping_clear_parser.add_argument("-f", "--force", action="store_true",
+ help="forces clearing without asking")
+ datacenter_sdn_port_mapping_clear_parser.set_defaults(func=datacenter_sdn_port_mapping_clear)
+ # =======================
+
+ # =======================sdn_controller_xxx section=======================
+ # sdn_controller_create
+ sdn_controller_create_parser = subparsers.add_parser('sdn-controller-create', parents=[parent_parser],
+ help="Creates an SDN controller entity within RO")
+ sdn_controller_create_parser.add_argument("name", help="name of the SDN controller")
+ sdn_controller_create_parser.add_argument("--description", action="store", help="description of the SDN controller")
+ sdn_controller_create_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+ sdn_controller_create_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+ sdn_controller_create_parser.add_argument("--dpid", action="store",
+ help="DPID of the dataplane switch controlled by this SDN controller")
+ sdn_controller_create_parser.add_argument("--type", action="store",
+ help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+ sdn_controller_create_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+ sdn_controller_create_parser.add_argument("--passwd", action="store", dest='password',
+ help="password credentials for the SDN controller")
+ sdn_controller_create_parser.set_defaults(func=sdn_controller_create)
+
+ # sdn_controller_edit
+ sdn_controller_edit_parser = subparsers.add_parser('sdn-controller-edit', parents=[parent_parser],
+ help="Update one or more options of a SDN controller")
+ sdn_controller_edit_parser.add_argument("name", help="name or uuid of the SDN controller", )
+ sdn_controller_edit_parser.add_argument("--name", action="store", help="Update the name of the SDN controller",
+ dest='new_name')
+ sdn_controller_edit_parser.add_argument("--description", action="store", help="description of the SDN controller")
+ sdn_controller_edit_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+ sdn_controller_edit_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+ sdn_controller_edit_parser.add_argument("--dpid", action="store",
+ help="DPID of the dataplane switch controlled by this SDN controller")
+ sdn_controller_edit_parser.add_argument("--type", action="store",
+ help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+ sdn_controller_edit_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+ sdn_controller_edit_parser.add_argument("--password", action="store",
+ help="password credentials for the SDN controller", dest='password')
+ sdn_controller_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+ #TODO: include option --file
+ sdn_controller_edit_parser.set_defaults(func=sdn_controller_edit)
+
+ #sdn_controller_list
+ sdn_controller_list_parser = subparsers.add_parser('sdn-controller-list',
+ parents=[parent_parser],
+ help="List the SDN controllers")
+ sdn_controller_list_parser.add_argument("name", nargs='?', help="name or uuid of the SDN controller")
+ sdn_controller_list_parser.set_defaults(func=sdn_controller_list)
+
+ # sdn_controller_delete
+ sdn_controller_delete_parser = subparsers.add_parser('sdn-controller-delete',
+ parents=[parent_parser],
+ help="Delete the the SDN controller")
+ sdn_controller_delete_parser.add_argument("name", help="name or uuid of the SDN controller")
+ sdn_controller_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+ sdn_controller_delete_parser.set_defaults(func=sdn_controller_delete)
+ # =======================
+
+ # WIM ======================= WIM section==================
+
+ # WIM create
+ wim_create_parser = subparsers.add_parser('wim-create',
+ parents=[parent_parser], help="creates a new wim")
+ wim_create_parser.add_argument("name", action="store",
+ help="name for the wim")
+ wim_create_parser.add_argument("url", action="store",
+ help="url for the wim")
+ wim_create_parser.add_argument("--type", action="store",
+ help="wim type: ietfl2vpn, dynpac, ...")
+ wim_create_parser.add_argument("--config", action="store",
+ help="additional configuration in json/yaml format")
+ wim_create_parser.add_argument("--description", action="store",
+ help="description of the wim")
+ wim_create_parser.set_defaults(func=wim_create)
+
+ # WIM delete
+ wim_delete_parser = subparsers.add_parser('wim-delete',
+ parents=[parent_parser], help="deletes a wim from the catalogue")
+ wim_delete_parser.add_argument("name", action="store",
+ help="name or uuid of the wim to be deleted")
+ wim_delete_parser.add_argument("-f", "--force", action="store_true",
+ help="forces deletion without asking")
+ wim_delete_parser.set_defaults(func=wim_delete)
+
+ # WIM edit
+ wim_edit_parser = subparsers.add_parser('wim-edit',
+ parents=[parent_parser], help="edits a wim")
+ wim_edit_parser.add_argument("name", help="name or uuid of the wim")
+ wim_edit_parser.add_argument("--file",
+ help="json/yaml text or file with the changes")\
+ .completer = FilesCompleter
+ wim_edit_parser.add_argument("-f", "--force", action="store_true",
+ help="do not prompt for confirmation")
+ wim_edit_parser.set_defaults(func=wim_edit)
+
+ # WIM list
+ wim_list_parser = subparsers.add_parser('wim-list',
+ parents=[parent_parser],
+ help="lists information about registered wims")
+ wim_list_parser.add_argument("name", nargs='?',
+ help="name or uuid of the wim")
+ wim_list_parser.add_argument("-a", "--all", action="store_true",
+ help="shows all wims, not only wims attached to tenant")
+ wim_list_parser.set_defaults(func=wim_list)
+
+ # WIM account create
+ wim_attach_parser = subparsers.add_parser('wim-account-create', parents=
+ [parent_parser], help="associates a wim account to the operating tenant")
+ wim_attach_parser.add_argument("name", help="name or uuid of the wim")
+ wim_attach_parser.add_argument('--account-name', action='store',
+ help="specify a name for the wim account.")
+ wim_attach_parser.add_argument("--user", action="store",
+ help="user credentials for the wim account")
+ wim_attach_parser.add_argument("--password", action="store",
+ help="password credentials for the wim account")
+ wim_attach_parser.add_argument("--config", action="store",
+ help="additional configuration in json/yaml format")
+ wim_attach_parser.set_defaults(func=wim_account_create)
+
+ # WIM account delete
+ wim_detach_parser = subparsers.add_parser('wim-account-delete',
+ parents=[parent_parser],
+ help="removes the association "
+ "between a wim account and the operating tenant")
+ wim_detach_parser.add_argument("name", help="name or uuid of the wim")
+ wim_detach_parser.add_argument("-a", "--all", action="store_true",
+ help="removes all associations from this wim")
+ wim_detach_parser.add_argument("-f", "--force", action="store_true",
+ help="forces delete without asking")
+ wim_detach_parser.set_defaults(func=wim_account_delete)
+
+ # WIM account edit
+ wim_attach_edit_parser = subparsers.add_parser('wim-account-edit', parents=
+ [parent_parser], help="modifies the association of a wim account to the operating tenant")
+ wim_attach_edit_parser.add_argument("name", help="name or uuid of the wim")
+ wim_attach_edit_parser.add_argument('--account-name', action='store',
+ help="specify a name for the wim account.")
+ wim_attach_edit_parser.add_argument("--user", action="store",
+ help="user credentials for the wim account")
+ wim_attach_edit_parser.add_argument("--password", action="store",
+ help="password credentials for the wim account")
+ wim_attach_edit_parser.add_argument("--config", action="store",
+ help="additional configuration in json/yaml format")
+ wim_attach_edit_parser.set_defaults(func=wim_account_edit)
+
+ # WIM port mapping set
+ wim_port_mapping_set_parser = subparsers.add_parser('wim-port-mapping-set',
+ parents=[parent_parser],
+ help="Load a file with the mappings "
+ "of ports of a WAN switch that is "
+ "connected to a PoP and the ports "
+ "of the switch controlled by the PoP")
+ wim_port_mapping_set_parser.add_argument("name", action="store",
+ help="specifies the wim")
+ wim_port_mapping_set_parser.add_argument("file",
+ help="json/yaml text or file with the wim port mapping")\
+ .completer = FilesCompleter
+ wim_port_mapping_set_parser.add_argument("-f", "--force",
+ action="store_true", help="forces overwriting without asking")
+ wim_port_mapping_set_parser.set_defaults(func=wim_port_mapping_set)
+
+ # WIM port mapping list
+ wim_port_mapping_list_parser = subparsers.add_parser('wim-port-mapping-list',
+ parents=[parent_parser], help="Show the port mappings for a wim")
+ wim_port_mapping_list_parser.add_argument("name", action="store",
+ help="specifies the wim")
+ wim_port_mapping_list_parser.set_defaults(func=wim_port_mapping_list)
+
+ # WIM port mapping clear
+ wim_port_mapping_clear_parser = subparsers.add_parser('wim-port-mapping-clear',
+ parents=[parent_parser], help="Clean the port mapping in a wim")
+ wim_port_mapping_clear_parser.add_argument("name", action="store",
+ help="specifies the wim")
+ wim_port_mapping_clear_parser.add_argument("-f", "--force",
+ action="store_true",
+ help="forces clearing without asking")
+ wim_port_mapping_clear_parser.set_defaults(func=wim_port_mapping_clear)
+
+ # =======================================================
+
+ action_dict={'net-update': 'retrieves external networks from datacenter',
+ 'net-edit': 'edits an external network',
+ 'net-delete': 'deletes an external network',
+ 'net-list': 'lists external networks from a datacenter'
+ }
+ for item in action_dict:
+ datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+ datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter")
+ if item=='net-edit' or item=='net-delete':
+ datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net")
+ if item=='net-edit':
+ datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+ if item!='net-list':
+ datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+ datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item)
+
+
+ action_dict={'netmap-import': 'create network senario netmap base on the datacenter networks',
+ 'netmap-create': 'create a new network senario netmap',
+ 'netmap-edit': 'edit name of a network senario netmap',
+ 'netmap-delete': 'deletes a network scenario netmap (--all for clearing all)',
+ 'netmap-list': 'list/show network scenario netmaps'
+ }
+ for item in action_dict:
+ datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+ datacenter_action_parser.add_argument("--datacenter", help="name or uuid of the datacenter")
+ #if item=='net-add':
+ # datacenter_action_parser.add_argument("net", help="name of the network")
+ if item=='netmap-delete':
+ datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to delete")
+ datacenter_action_parser.add_argument("--all", action="store_true", help="delete all netmap of this datacenter")
+ datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+ if item=='netmap-edit':
+ datacenter_action_parser.add_argument("netmap", help="name or uuid of the datacenter netmap do edit")
+ datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file with the changes").completer = FilesCompleter
+ datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap")
+ datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+ datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+ if item=='netmap-list':
+ datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to show")
+ if item=='netmap-create':
+ datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file descriptor with the changes").completer = FilesCompleter
+ datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap, by default same as vim-name")
+ datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+ datacenter_action_parser.add_argument('--vim-name', action='store', help="specify vim network name")
+ if item=='netmap-import':
+ datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+ datacenter_action_parser.set_defaults(func=datacenter_netmap_action, action=item)
+
+ # =======================vim_net_sdn_xxx section=======================
+ # vim_net_sdn_attach
+ vim_net_sdn_attach_parser = subparsers.add_parser('vim-net-sdn-attach',
+ parents=[parent_parser],
+ help="Specify the port to access to an external network using SDN")
+ vim_net_sdn_attach_parser.add_argument("vim_net", action="store",
+ help="Name/id of the network in the vim that will be used to connect to the external network")
+ vim_net_sdn_attach_parser.add_argument("port", action="store", help="Specifies the port in the dataplane switch to access to the external network")
+ vim_net_sdn_attach_parser.add_argument("--vlan", action="store", help="Specifies the vlan (if any) to use in the defined port")
+ vim_net_sdn_attach_parser.add_argument("--mac", action="store", help="Specifies the MAC (if known) of the physical device that will be reachable by this external port")
+ vim_net_sdn_attach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+ vim_net_sdn_attach_parser.set_defaults(func=vim_net_sdn_attach)
+
+ # vim_net_sdn_detach
+ vim_net_sdn_detach_parser = subparsers.add_parser('vim-net-sdn-detach',
+ parents=[parent_parser],
+ help="Remove the port information to access to an external network using SDN")
+
+ vim_net_sdn_detach_parser.add_argument("vim_net", action="store", help="Name/id of the vim network")
+ vim_net_sdn_detach_parser.add_argument("--id", action="store",help="Specify the uuid of the external ports from this network to be detached")
+ vim_net_sdn_detach_parser.add_argument("--all", action="store_true", help="Detach all external ports from this network")
+ vim_net_sdn_detach_parser.add_argument("-f", "--force", action="store_true", help="forces clearing without asking")
+ vim_net_sdn_detach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+ vim_net_sdn_detach_parser.set_defaults(func=vim_net_sdn_detach)
+ # =======================
+
+ for item in ("network", "tenant", "image"):
+ if item=="network":
+ command_name = 'vim-net'
+ else:
+ command_name = 'vim-'+item
+ vim_item_list_parser = subparsers.add_parser(command_name + '-list', parents=[parent_parser], help="list the vim " + item + "s")
+ vim_item_list_parser.add_argument("name", nargs='?', help="name or uuid of the " + item + "s")
+ vim_item_list_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+ vim_item_list_parser.set_defaults(func=vim_action, item=item, action="list")
+
+ vim_item_del_parser = subparsers.add_parser(command_name + '-delete', parents=[parent_parser], help="list the vim " + item + "s")
+ vim_item_del_parser.add_argument("name", help="name or uuid of the " + item + "s")
+ vim_item_del_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+ vim_item_del_parser.set_defaults(func=vim_action, item=item, action="delete")
+
+ if item == "network" or item == "tenant":
+ vim_item_create_parser = subparsers.add_parser(command_name + '-create', parents=[parent_parser], help="create a "+item+" at vim")
+ vim_item_create_parser.add_argument("file", nargs='?', help="descriptor of the {}. Must be a file or yaml/json text".format(item)).completer = FilesCompleter
+ vim_item_create_parser.add_argument("--name", action="store", help="name of the {}".format(item))
+ vim_item_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+ if item=="network":
+ vim_item_create_parser.add_argument("--type", action="store", help="type of network, data, ptp, bridge")
+ vim_item_create_parser.add_argument("--shared", action="store_true", help="Private or shared")
+ vim_item_create_parser.add_argument("--bind-net", action="store", help="For openvim datacenter type, net to be bind to, for vlan type, use sufix ':<vlan_tag>'")
+ else:
+ vim_item_create_parser.add_argument("--description", action="store", help="description of the {}".format(item))
+ vim_item_create_parser.set_defaults(func=vim_action, item=item, action="create")
+
+ argcomplete.autocomplete(main_parser)
+
+ try:
+ args = main_parser.parse_args()
+ #logging info
+ level = logging.CRITICAL
+ streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
+ if "debug" in args and args.debug:
+ level = logging.DEBUG
+ logging.basicConfig(format=streamformat, level= level)
+ logger = logging.getLogger('mano')
+ logger.setLevel(level)
+ # print("#TODO py3", args)
+ result = args.func(args)
+ if result == None:
+ result = 0
+ #for some reason it fails if call exit inside try instance. Need to call exit at the end !?
+ except (requests.exceptions.ConnectionError):
+ print("Connection error: not possible to contact OPENMANO-SERVER (openmanod)")
+ result = -2
+ except (KeyboardInterrupt):
+ print('Exiting openmano')
+ result = -3
+ except (SystemExit, ArgumentParserError):
+ result = -4
+ except (AttributeError):
+ print("Type '--help' for more information")
+ result = -4
+ except OpenmanoCLIError as e:
+ # print("#TODO py3", e)
+ print(e)
+ result = -5
+
+ # print(result)
+ exit(result)
+
+
+if __name__ == '__main__':
+ main()
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+argcomplete
+requests==2.*
+PyYAML
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from setuptools import setup
+
+_name = "osm_roclient"
+# version is at first line of osm_roclient/html_public/version
+here = os.path.abspath(os.path.dirname(__file__))
+with open(os.path.join(here, 'README.rst')) as readme_file:
+ README = readme_file.read()
+
+setup(
+ name=_name,
+ description='OSM ro client',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ author_email='alfonso.tiernosepulveda@telefonica.com',
+ maintainer='Alfonso Tierno',
+ maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+ url='https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ # data_files=[('/etc/osm/', ['osm_roclient/lcm.cfg']),
+ # ('/etc/systemd/system/', ['osm_roclient/osm-lcm.service']),
+ # ],
+ install_requires=[
+ 'PyYAML',
+ 'requests==2.*',
+ 'argcomplete',
+ ],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ "console_scripts": [
+ "openmano=osm_roclient.roclient:main"
+ ]
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Maintainer: Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>
+Depends3: python3-argcomplete, python3-requests, python3-yaml
--- /dev/null
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+envlist = flake8
+toxworkdir={toxinidir}/../.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_roclient --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_roclient.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+#include MANIFEST.in
+#include requirements.txt
+include README.rst
+include requirements.txt
+include README.rst
+recursive-include osm_ro *
+
--- /dev/null
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: all test clean
+
+SHELL := /bin/bash
+
+BRANCH ?= master
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_ro-*.tar.gz osm_ro.egg-info .eggs
+ find osm_ro -name '*.pyc' -delete
+ find osm_ro -name '*.pyo' -delete
+
+package:
+# apt-get install -y python-stdeb
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cp debian/python3-osm-ro.postinst deb_dist/osm-ro*/debian/
+ # cd deb_dist/osm-nbi*/debian && echo "cryptography python3-cryptography (>= 1.9)" > py3dist-overrides
+ cd deb_dist/osm-ro*/ && dpkg-buildpackage -rfakeroot -uc -us
+ # mkdir -p .build
+ # cp build/deb_dist/python-*.deb .build/
+
+clean_build:
+ rm -rf build
+ find osm_ro -name '*.pyc' -delete
+ find osm_ro -name '*.pyo' -delete
+
+prepare:
+# ip install --user --upgrade setuptools
+ mkdir -p build/
+# VER1=$(shell git describe | sed -e 's/^v//' |cut -d- -f1); \
+# VER2=$(shell git describe | cut -d- -f2); \
+# VER3=$(shell git describe | cut -d- -f3); \
+# echo "$$VER1.dev$$VER2+$$VER3" > build/RO_VERSION
+ cp tox.ini build/
+ cp MANIFEST.in build/
+ cp requirements.txt build/
+ cp README.rst build/
+ cp setup.py build/
+ cp stdeb.cfg build/
+ cp -r osm_ro build/
+ cp openmano build/
+ cp openmanod build/
+ cp -r vnfs build/osm_ro
+ cp -r scenarios build/osm_ro
+ cp -r instance-scenarios build/osm_ro
+ cp -r scripts build/osm_ro
+ cp -r database_utils build/osm_ro
+ cp LICENSE build/osm_ro
+
+connectors: prepare
+ # python-novaclient is required for that
+ rm -f build/osm_ro/openmanolinkervimconn.py
+ cd build/osm_ro; for i in `ls vimconn_*.py |sed "s/\.py//"` ; do echo "import $$i" >> openmanolinkervimconn.py; done
+ python build/osm_ro/openmanolinkervimconn.py 2>&1
+ rm -f build/osm_ro/openmanolinkervimconn.py
+
+build: connectors prepare
+ python -m py_compile build/osm_ro/*.py
+# cd build && tox -e flake8
+
+lib-openvim:
+ $(shell git clone https://osm.etsi.org/gerrit/osm/openvim)
+ LIB_BRANCH=$(shell git -C openvim branch -a|grep -oP 'remotes/origin/\K$(BRANCH)'); \
+ [ -z "$$LIB_BRANCH" ] && LIB_BRANCH='master'; \
+ echo "BRANCH: $(BRANCH)"; \
+ echo "LIB_OPENVIM_BRANCH: $$LIB_BRANCH"; \
+ git -C openvim checkout $$LIB_BRANCH
+ make -C openvim clean lite
+
+osm-im:
+ $(shell git clone https://osm.etsi.org/gerrit/osm/IM)
+ make -C IM clean all
+
+snap:
+ echo "Nothing to be done yet"
+
+install: lib-openvim osm-im
+ dpkg -i IM/deb_dist/python-osm-im*.deb
+ dpkg -i openvim/.build/python-lib-osm-openvim*.deb
+ dpkg -i .build/python-osm-ro*.deb
+ cd .. && \
+ OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` || FATAL "lib-osm-openvim was not properly installed" && \
+ OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'` || FATAL "osm-ro was not properly installed" && \
+ USER=root DEBIAN_FRONTEND=noninteractive $$OSMRO_PATH/database_utils/install-db-server.sh --updatedb || FATAL "osm-ro db installation failed" && \
+ USER=root DEBIAN_FRONTEND=noninteractive $$OSMLIBOVIM_PATH/database_utils/install-db-server.sh -u mano -p manopw -d mano_vim_db --updatedb || FATAL "lib-osm-openvim db installation failed"
+ service osm-ro restart
+
+develop: prepare
+# pip install -r requirements.txt
+ cd build && ./setup.py develop
+
+test:
+ . ./test/basictest.sh -f --insert-bashrc --install-openvim --init-openvim
+ . ./test/basictest.sh -f reset add-openvim
+ ./test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
+ ./test/test_RO.py vim -t osm -d local-openvim --timeout=30 --failfast
+
+build-docker-from-source:
+ docker build -t osm/openmano -f docker/Dockerfile-local .
+
+run-docker:
+ docker-compose -f docker/openmano-compose.yml up
+
+stop-docker:
+ docker-compose -f docker/openmano-compose.yml down
+
+
--- /dev/null
+ Copyright 2018 Telefonica S.A.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+===========
+osm-ro
+===========
+
+osm-ro is the Resource Orchestrator for OSM, dealing with resource operations
+against different VIMs such as Openstack, VMware's vCloud Director, openvim
+and AWS.
+
--- /dev/null
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-RO"
+# Ensure a proper version of cryptography, needed by paramiko
+python3 -m pip install "cryptography>=2.5" -U
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Implement like a proxy for TCP/IP in a separated thread.
+It creates two sockets to bypass the TCP/IP packets among the fix console
+server specified at class construction (console_host, console_port)
+and a client that connect against the (host, port) specified also at construction
+
+ --------------------- -------------------------------
+ | OPENMANO | | VIM |
+client 1 ----> | ConsoleProxyThread | ------> | Console server |
+client 2 ----> | (host, port) | ------> |(console_host, console_server)|
+ ... -------------------- ------------------------------
+'''
+__author__="Alfonso Tierno"
+__date__ ="$19-nov-2015 09:07:15$"
+
+import socket
+import select
+import threading
+import logging
+
+
+class ConsoleProxyException(Exception):
+ '''raise when an exception has found'''
+class ConsoleProxyExceptionPortUsed(ConsoleProxyException):
+ '''raise when the port is used'''
+
+class ConsoleProxyThread(threading.Thread):
+ buffer_size = 4096
+ check_finish = 1 #frequency to check if requested to end in seconds
+
+ def __init__(self, host, port, console_host, console_port, log_level=None):
+ try:
+ threading.Thread.__init__(self)
+ self.console_host = console_host
+ self.console_port = console_port
+ self.host = host
+ self.port = port
+ self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.server.bind((host, port))
+ self.server.listen(200)
+ #TODO timeout in a lock section can be used to autoterminate the thread
+ #when inactivity and timeout<time : set timeout=0 and terminate
+ #from outside, close class when timeout==0; set timeout=time+120 when adding a new console on this thread
+ #set self.timeout = time.time() + 120 at init
+ self.name = "ConsoleProxy " + console_host + ":" + str(console_port)
+ self.input_list = [self.server]
+ self.channel = {}
+ self.terminate = False #put at True from outside to force termination
+ self.logger = logging.getLogger('openmano.console')
+ if log_level:
+ self.logger.setLevel( getattr(logging, log_level) )
+
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ if e is socket.error and e.errno==98:
+ raise ConsoleProxyExceptionPortUsed("socket.error " + str(e))
+ raise ConsoleProxyException(type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0])) )
+
+ def run(self):
+ while True:
+ try:
+ inputready, _, _ = select.select(self.input_list, [], [], self.check_finish)
+ except select.error as e:
+ self.logger.error("Exception on select %s: %s", type(e).__name__, str(e) )
+ self.on_terminate()
+
+ if self.terminate:
+ self.on_terminate()
+ self.logger.debug("Terminate because commanded")
+ break
+
+ for sock in inputready:
+ if sock == self.server:
+ self.on_accept()
+ else:
+ self.on_recv(sock)
+
+ def on_terminate(self):
+ while self.input_list:
+ if self.input_list[0] is self.server:
+ self.server.close()
+ del self.input_list[0]
+ else:
+ self.on_close(self.input_list[0], "Terminating thread")
+
+ def on_accept(self):
+ #accept
+ try:
+ clientsock, clientaddr = self.server.accept()
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ self.logger.error("Exception on_accept %s: %s", type(e).__name__, str(e) )
+ return False
+ #print self.name, ": Accept new client ", clientaddr
+
+ #connect
+ try:
+ forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ forward.connect((self.console_host, self.console_port))
+ name = "{}:{} => ({}:{} => {}:{}) => {}:{}".format(
+ *clientsock.getpeername(), *clientsock.getsockname(), *forward.getsockname(), *forward.getpeername() )
+ self.logger.warning("new connection " + name)
+
+ self.input_list.append(clientsock)
+ self.input_list.append(forward)
+ info = { "name": name,
+ "clientsock" : clientsock,
+ "serversock" : forward
+ }
+ self.channel[clientsock] = info
+ self.channel[forward] = info
+ return True
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ self.logger.error("Exception on_connect to server %s:%d; %s: %s Close client side %s",
+ self.console_host, self.console_port, type(e).__name__, str(e), str(clientaddr) )
+ clientsock.close()
+ return False
+
+ def on_close(self, sock, cause):
+ if sock not in self.channel:
+ return #can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
+ info = self.channel[sock]
+ # debug info
+ sockname = "client" if sock is info["clientsock"] else "server"
+ self.logger.warning("del connection %s %s at %s side", info["name"], str(cause), str(sockname))
+ # close sockets
+ try:
+ # close the connection with client
+ info["clientsock"].close() # equivalent to do self.s.close()
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ self.logger.error("Exception on_close client socket %s: %s", type(e).__name__, str(e))
+ try:
+ # close the connection with remote server
+ info["serversock"].close()
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ self.logger.error("Exception on_close server socket %s: %s", type(e).__name__, str(e) )
+
+ # remove objects from input_list
+ self.input_list.remove(info["clientsock"])
+ self.input_list.remove(info["serversock"])
+ # delete both objects from channel dict
+ del self.channel[info["clientsock"]]
+ del self.channel[info["serversock"]]
+
+ def on_recv(self, sock):
+ if sock not in self.channel:
+ return # can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
+ info = self.channel[sock]
+ peersock = info["serversock"] if sock is info["clientsock"] else info["clientsock"]
+ try:
+ data = sock.recv(self.buffer_size)
+ if len(data) == 0:
+ self.on_close(sock, "peer closed")
+ else:
+ # print self.data
+ sock = peersock
+ peersock.send(data)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+ # print(self.name, ": Exception {}: {}".format(type(e).__name__, e))
+ self.on_close(sock, "Exception {}: {}".format(type(e).__name__, e))
+
+
+
+ #def start_timeout(self):
+ # self.timeout = time.time() + 120
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+LICENSE_HEAD='/**
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+* This file is part of openmano
+* All Rights Reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+*
+* For those usages not covered by the Apache License, Version 2.0 please
+* contact with: nfvlabs@tid.es
+**/
+'
+
+DBUSER="mano"
+DBPASS=""
+DBHOST="localhost"
+DBPORT="3306"
+DBNAME="mano_db"
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+DIRNAME=`dirname $(readlink -f $0)`
+
+function usage(){
+ echo -e "Usage: $0 OPTIONS"
+ echo -e " Dumps openmano database content"
+ echo -e " OPTIONS"
+ echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails"
+ echo -e " -p PASS database password. 'No password' by default. Prompts if DB access fails"
+ echo -e " -P PORT database port. '$DBPORT' by default"
+ echo -e " -h HOST database host. '$DBHOST' by default"
+ echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
+ echo -e " --help shows this help"
+}
+
+while getopts ":u:p:P:h:-:" o; do
+ case "${o}" in
+ u)
+ DBUSER="$OPTARG"
+ ;;
+ p)
+ DBPASS="$OPTARG"
+ ;;
+ P)
+ DBPORT="$OPTARG"
+ ;;
+ d)
+ DBNAME="$OPTARG"
+ ;;
+ h)
+ DBHOST="$OPTARG"
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ echo "Invalid option: --$OPTARG" >&2 && usage >&2
+ exit 1
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2 && usage >&2
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2 && usage >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit -1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+#check and ask for database user password
+DBUSER_="-u$DBUSER"
+DBPASS_=""
+[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+DBHOST_="-h$DBHOST"
+DBPORT_="-P$DBPORT"
+while ! echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1
+do
+ [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
+ [ -z "$logintry" ] && echo -e "\nProvide database name and credentials"
+ read -e -p "mysql database name($DBNAME): " KK
+ [ -n "$KK" ] && DBNAME="$KK"
+ read -e -p "mysql user($DBUSER): " KK
+ [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER"
+ read -e -s -p "mysql password: " DBPASS
+ [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+ [ -z "$DBPASS" ] && DBPASS_=""
+ logintry="yes"
+ echo
+done
+
+
+#echo structure, including the content of schema_version
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_structure.sql
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-data --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_structure.sql
+echo -e "\n\n\n\n" >> ${DIRNAME}/${DBNAME}_structure.sql
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME --tables schema_version 2>/dev/null >> ${DIRNAME}/${DBNAME}_structure.sql
+echo " ${DIRNAME}/${DBNAME}_structure.sql"
+
+#echo only data
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_data.sql #copy my own header
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME >> ${DIRNAME}/${DBNAME}_data.sql
+echo " ${DIRNAME}/${DBNAME}_data.sql"
+
+#echo all
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_all.sql #copy my own header
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_all.sql
+echo " ${DIRNAME}/${DBNAME}_all.sql"
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+DBUSER="mano"
+DBPASS=""
+DEFAULT_DBPASS="manopw"
+DBHOST=""
+DBPORT="3306"
+DBNAME="mano_db"
+QUIET_MODE=""
+CREATEDB=""
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+DIRNAME=`dirname $(readlink -f $0)`
+
+function usage(){
+ echo -e "Usage: $0 OPTIONS [version]"
+ echo -e " Inits openmano database; deletes previous one and loads from ${DBNAME}_structure.sql"\
+ echo -e " and data from host_ranking.sql, nets.sql, of_ports_pci_correspondece*.sql"
+ "If [version] is not provided, it is upgraded to the last version"
+ echo -e " OPTIONS"
+ echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails"
+ echo -e " -p PASS database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
+ echo -e " -P PORT database port. '$DBPORT' by default"
+ echo -e " -h HOST database host. 'localhost' by default"
+ echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
+ echo -e " -q --quiet: Do not prompt for credentials and exit if cannot access to database"
+ echo -e " --createdb forces the deletion and creation of the database"
+ echo -e " --help shows this help"
+}
+
+while getopts ":u:p:P:h:d:q-:" o; do
+ case "${o}" in
+ u)
+ DBUSER="$OPTARG"
+ ;;
+ p)
+ DBPASS="$OPTARG"
+ ;;
+ P)
+ DBPORT="$OPTARG"
+ ;;
+ d)
+ DBNAME="$OPTARG"
+ ;;
+ h)
+ DBHOST="$OPTARG"
+ ;;
+ q)
+ export QUIET_MODE="-q"
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE="-q" && continue
+ [ "${OPTARG}" == "createdb" ] && export CREATEDB=yes && continue
+ echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+DB_VERSION=$1
+
+if [ -n "$DB_VERSION" ] ; then
+ # check it is a number and an allowed one
+ [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null ||
+ ! echo "parameter 'version' requires a integer value" >&2 || exit 1
+fi
+
+# Creating temporary file
+TEMPFILE="$(mktemp -q --tmpdir "initdb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+
+if [ -n "${CREATEDB}" ] ; then
+ FIRST_TRY="yes"
+ while ! DB_ERROR=`mysqladmin "$DEF_EXTRA_FILE_PARAM" -s status 2>&1 >/dev/null` ; do
+ # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+ [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+ continue
+ echo "$DB_ERROR"
+ [[ -n "$QUIET_MODE" ]] && echo -e "Invalid admin database credentials!!!" >&2 && exit 1
+ echo -e "Provide database credentials (Ctrl+c to abort):"
+ read -e -p " mysql user($DBUSER): " KK
+ [ -n "$KK" ] && DBUSER="$KK"
+ read -e -s -p " mysql password: " DBPASS
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+ FIRST_TRY=""
+ echo
+ done
+ # echo " deleting previous database ${DBNAME} if it exists"
+ mysqladmin $DEF_EXTRA_FILE_PARAM DROP "${DBNAME}" -f && echo "Previous database deleted"
+ echo " creating database ${DBNAME}"
+ mysqladmin $DEF_EXTRA_FILE_PARAM create "${DBNAME}" || exit 1
+fi
+
+# Check and ask for database user password
+FIRST_TRY="yes"
+while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
+do
+ # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+ [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+ continue
+ echo "$DB_ERROR"
+ [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
+ echo -e "Provide database name and credentials (Ctrl+c to abort):"
+ read -e -p " mysql database name($DBNAME): " KK
+ [ -n "$KK" ] && DBNAME="$KK"
+ read -e -p " mysql user($DBUSER): " KK
+ [ -n "$KK" ] && DBUSER="$KK"
+ read -e -s -p " mysql password: " DBPASS
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+ FIRST_TRY=""
+ echo
+done
+
+DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
+DBUSER_="" && [ -n "$DBUSER" ] && DBUSER_="-u$DBUSER"
+DBPASS_="" && [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+DBHOST_="" && [ -n "$DBHOST" ] && DBHOST_="-h$DBHOST"
+DBPORT_="-P$DBPORT"
+
+echo " loading ${DIRNAME}/mano_db_structure.sql"
+sed -e "s/{{mano_db}}/$DBNAME/" ${DIRNAME}/mano_db_structure.sql | mysql $DEF_EXTRA_FILE_PARAM ||
+ ! echo "ERROR at init $DBNAME" || exit 1
+
+echo " migrage database version"
+# echo "${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION"
+${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION
+
--- /dev/null
+#!/usr/bin/env bash
+
+##
+# Copyright Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+DB_NAME='mano_db'
+DB_ADMIN_USER="root"
+DB_USER="mano"
+DB_PASS="manopw"
+DB_ADMIN_PASSWD=""
+DB_PORT="3306"
+DB_HOST=""
+DB_HOST_PARAM=""
+QUIET_MODE=""
+FORCEDB=""
+UPDATEDB=""
+NO_PACKAGES=""
+UNINSTALL=""
+
+
+function usage(){
+ echo -e "usage: sudo $0 [OPTIONS]"
+ echo -e "Install openmano database server and the needed packages"
+ echo -e " OPTIONS"
+ echo -e " -U USER: database admin user. '$DB_ADMIN_USER' by default. Prompts if needed"
+ echo -e " -P PASS: database admin password to be used or installed. Prompts if needed"
+ echo -e " -d: database name, '$DB_NAME' by default"
+ echo -e " -u: database user, '$DB_USER' by default"
+ echo -e " -p: database pass, '$DB_PASS' by default"
+ echo -e " -H: HOST database host. 'localhost' by default"
+ echo -e " -T: PORT database port. '$DB_PORT' by default"
+ echo -e " -q --quiet: install in unattended mode"
+ echo -e " -h --help: show this help"
+ echo -e " --forcedb: if database exists, it is dropped and a new one is created"
+ echo -e " --updatedb: if database exists, it preserves the content and it is updated to the needed version"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+ echo -e " --unistall: delete database"
+}
+
+function ask_user(){
+ # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
+ # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
+ # Return: true(0) if user type 'yes'; false (1) if user type 'no'
+ read -e -p "$1" USER_CONFIRMATION
+ while true ; do
+ [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
+ [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
+ [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
+ [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
+ read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
+ done
+}
+
+function install_packages(){
+ [ -x /usr/bin/apt-get ] && apt-get install -y $*
+ [ -x /usr/bin/yum ] && yum install -y $*
+
+ #check properly installed
+ for PACKAGE in $*
+ do
+ PACKAGE_INSTALLED="no"
+ [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
+ [ -x /usr/bin/yum ] && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
+ if [ "$PACKAGE_INSTALLED" = "no" ]
+ then
+ echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
+ exit 1
+ fi
+ done
+}
+
+function _install_mysql_package(){
+ echo '
+ #################################################################
+ ##### INSTALL REQUIRED PACKAGES #####
+ #################################################################'
+ [ "$_DISTRO" == "Ubuntu" ] && ! install_packages "mysql-server" && exit 1
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && ! install_packages "mariadb mariadb-server" && exit 1
+
+ if [[ "$_DISTRO" == "Ubuntu" ]]
+ then
+ #start services. By default CentOS does not start services
+ service mysql start >> /dev/null
+ # try to set admin password, ignore if fails
+ [[ -n $DBPASSWD ]] && mysqladmin -u $DB_ADMIN_USER -s password $DB_ADMIN_PASSWD
+ fi
+
+ if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
+ then
+ #start services. By default CentOS does not start services
+ service mariadb start
+ service httpd start
+ systemctl enable mariadb
+ systemctl enable httpd
+ ask_user "Do you want to configure mariadb (recommended if not done before) (Y/n)? " y &&
+ mysql_secure_installation
+
+ ask_user "Do you want to set firewall to grant web access port 80,443 (Y/n)? " y &&
+ firewall-cmd --permanent --zone=public --add-service=http &&
+ firewall-cmd --permanent --zone=public --add-service=https &&
+ firewall-cmd --reload
+ fi
+}
+
+function _create_db(){
+ echo '
+ #################################################################
+ ##### CREATE AND INIT DATABASE #####
+ #################################################################'
+ echo "mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME}"
+ mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME} \
+ || ! echo "Error creating ${DB_NAME} database" >&2 \
+ || exit 1
+ echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';" | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
+ || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
+ echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
+ || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
+ || exit 1
+ echo " Database '${DB_NAME}' created, user '$DB_USER' password '$DB_PASS'"
+ DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+ ${DIRNAME}/init_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
+ || ! echo "Error initializing database '$DB_NAME'" >&2 \
+ || exit 1
+}
+
+function _delete_db(){
+ mysqladmin --defaults-extra-file="$TEMPFILE" -s drop "${DB_NAME}" $DBDELETEPARAM \
+ || ! echo "Error: Could not delete '${DB_NAME}' database" >&2 \
+ || exit 1
+}
+
+function _update_db(){
+ echo '
+ #################################################################
+ ##### UPDATE DATABASE #####
+ #################################################################'
+ echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';" | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
+ || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
+ echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
+ || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
+ || exit 1
+ echo " Granted privileges to user '$DB_USER' password '$DB_PASS' to existing database '${DB_NAME}'"
+ DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+ ${DIRNAME}/migrate_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
+ || ! echo "Error updating database '$DB_NAME'" >&2 \
+ || exit 1
+}
+
+function _uninstall_db(){
+echo '
+ #################################################################
+ ##### DELETE DATABASE #####
+ #################################################################'
+ DBDELETEPARAM=""
+ [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
+ _delete_db
+}
+
+function db_exists(){ # (db_name, credential_file)
+ # check credentials
+ mysqlshow --defaults-extra-file="$2" >/dev/null || exit 1
+ if mysqlshow --defaults-extra-file="$2" | grep -v Wildcard | grep -w -q $1
+ then
+ # echo " DB $1 exists"
+ return 0
+ fi
+ # echo " DB $1 does not exist"
+ return 1
+}
+
+while getopts ":U:P:d:u:p:H:T:hiq-:" o; do
+ case "${o}" in
+ U)
+ export DB_ADMIN_USER="$OPTARG"
+ ;;
+ P)
+ export DB_ADMIN_PASSWD="$OPTARG"
+ ;;
+ d)
+ export DB_NAME="$OPTARG"
+ ;;
+ u)
+ export DB_USER="$OPTARG"
+ ;;
+ p)
+ export DB_PASS="$OPTARG"
+ ;;
+ H)
+ export DB_HOST="$OPTARG"
+ export DB_HOST_PARAM="-h$DB_HOST"
+ ;;
+ T)
+ export DB_PORT="$OPTARG"
+ ;;
+ q)
+ export QUIET_MODE=yes
+ export DEBIAN_FRONTEND=noninteractive
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "forcedb" ] && FORCEDB="y" && continue
+ [ "${OPTARG}" == "updatedb" ] && UPDATEDB="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+if [ -n "$FORCEDB" ] && [ -n "$UPDATEDB" ] ; then
+ echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
+ exit 1
+fi
+
+# Discover Linux distribution
+# try redhat type
+[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut -d" " -f1)
+# if not assuming ubuntu type
+[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is 2>/dev/null)
+
+if [[ -z "$NO_PACKAGES" ]]
+then
+ [ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+ _install_mysql_package || exit 1
+fi
+
+# Creating temporary file for MYSQL installation and initialization"
+TEMPFILE="$(mktemp -q --tmpdir "installdb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
+
+#check and ask for database user password. Must be done after database installation
+if [[ -z $QUIET_MODE ]]
+then
+ echo -e "\nCheking database connection and ask for credentials"
+ # echo "mysqladmin --defaults-extra-file=$TEMPFILE -s status >/dev/null"
+ while ! mysqladmin --defaults-extra-file="$TEMPFILE" -s status >/dev/null
+ do
+ [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
+ [ -z "$logintry" ] && echo -e "\nProvide database credentials"
+ read -e -p "database admin user? ($DB_ADMIN_USER) " DBUSER_
+ [ -n "$DBUSER_" ] && DB_ADMIN_USER=$DBUSER_
+ read -e -s -p "database admin password? (Enter for not using password) " DBPASSWD_
+ [ -n "$DBPASSWD_" ] && DB_ADMIN_PASSWD="$DBPASSWD_"
+ [ -z "$DBPASSWD_" ] && DB_ADMIN_PASSWD=""
+ echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
+ logintry="yes"
+ done
+fi
+
+if [[ ! -z "$UNINSTALL" ]]
+then
+ _uninstall_db
+ exit
+fi
+
+# Create or update database
+if db_exists $DB_NAME $TEMPFILE ; then
+ if [[ -n $FORCEDB ]] ; then
+ # DBDELETEPARAM=""
+ # [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
+ DBDELETEPARAM="-f"
+ _delete_db
+ _create_db
+ elif [[ -n $UPDATEDB ]] ; then
+ _update_db
+ elif [[ -z $QUIET_MODE ]] ; then
+ echo "database '$DB_NAME' exist. Reinstall it?"
+ if ask_user "Type 'y' to drop and reinstall existing database (content will be lost), Type 'n' to update existing database (y/N)? " n ; then
+ _delete_db
+ _create_db
+ else
+ _update_db
+ fi
+ else
+ echo "Database '$DB_NAME' exists. Use option '--forcedb' to force the deletion of the existing one, or '--updatedb' to use existing one and update it"
+ exit 1
+ fi
+else
+ _create_db
+fi
+
--- /dev/null
+/**
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+* This file is part of openmano
+* All Rights Reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+*
+* For those usages not covered by the Apache License, Version 2.0 please
+* contact with: nfvlabs@tid.es
+**/
+
+-- MySQL dump 10.13 Distrib 5.7.30, for Linux (x86_64)
+--
+-- Host: localhost Database: {{mano_db}}
+-- ------------------------------------------------------
+-- Server version 5.7.27
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Current Database: `{{mano_db}}`
+--
+
+/*!40000 DROP DATABASE IF EXISTS `{{mano_db}}`*/;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `{{mano_db}}` /*!40100 DEFAULT CHARACTER SET utf8 */;
+
+USE `{{mano_db}}`;
+
+--
+-- Table structure for table `datacenter_nets`
+--
+
+DROP TABLE IF EXISTS `datacenter_nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenter_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `vim_net_id` varchar(300) NOT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+ `shared` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If can be shared with serveral scenarios',
+ `description` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name_datacenter_id` (`name`,`datacenter_id`),
+ KEY `FK_datacenter_nets_datacenters` (`datacenter_id`),
+ CONSTRAINT `FK_datacenter_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contain the external nets of a datacenter';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenter_tenants`
+--
+
+DROP TABLE IF EXISTS `datacenter_tenants`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenter_tenants` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) DEFAULT NULL,
+ `datacenter_id` varchar(36) NOT NULL COMMENT 'Datacenter of this tenant',
+ `vim_tenant_name` varchar(256) DEFAULT NULL,
+ `vim_tenant_id` varchar(256) DEFAULT NULL COMMENT 'Tenant ID at VIM',
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if this tenant has been created by openmano, or it existed on VIM',
+ `user` varchar(64) DEFAULT NULL,
+ `passwd` varchar(64) DEFAULT NULL,
+ `config` varchar(4000) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_vim_tenants_datacenters` (`datacenter_id`),
+ CONSTRAINT `FK_vim_tenants_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters`
+--
+
+DROP TABLE IF EXISTS `datacenters`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `type` varchar(36) NOT NULL DEFAULT 'openvim',
+ `vim_url` varchar(150) NOT NULL COMMENT 'URL of the VIM for the REST API',
+ `vim_url_admin` varchar(150) DEFAULT NULL,
+ `config` varchar(4000) DEFAULT NULL COMMENT 'extra config information in json',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Datacenters managed by the NFVO.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters_flavors`
+--
+
+DROP TABLE IF EXISTS `datacenters_flavors`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters_flavors` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `flavor_id` varchar(36) NOT NULL,
+ `datacenter_vim_id` varchar(36) NOT NULL,
+ `vim_id` varchar(300) NOT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `vim_info` text,
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
+ `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of additional devices',
+ PRIMARY KEY (`id`),
+ KEY `FK__flavors` (`flavor_id`),
+ KEY `FK_datacenters_flavors_datacenter_tenants` (`datacenter_vim_id`),
+ CONSTRAINT `FK__flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_datacenters_flavors_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters_images`
+--
+
+DROP TABLE IF EXISTS `datacenters_images`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters_images` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `image_id` varchar(36) NOT NULL,
+ `datacenter_vim_id` varchar(36) NOT NULL,
+ `vim_id` varchar(300) NOT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `vim_info` text,
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
+ PRIMARY KEY (`id`),
+ KEY `FK__images` (`image_id`),
+ KEY `FK_datacenters_images_datacenter_tenants` (`datacenter_vim_id`),
+ CONSTRAINT `FK__images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_datacenters_images_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `flavors`
+--
+
+DROP TABLE IF EXISTS `flavors`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `flavors` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `disk` smallint(5) unsigned DEFAULT NULL,
+ `ram` mediumint(7) unsigned DEFAULT NULL,
+ `vcpus` smallint(5) unsigned DEFAULT NULL,
+ `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
+ PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `images`
+--
+
+DROP TABLE IF EXISTS `images`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `images` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `universal_name` varchar(255) DEFAULT NULL,
+ `checksum` varchar(32) DEFAULT NULL,
+ `location` varchar(200) DEFAULT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `metadata` varchar(2000) DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `location` (`location`),
+ UNIQUE KEY `universal_name_checksum` (`universal_name`,`checksum`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_actions`
+--
+
+DROP TABLE IF EXISTS `instance_actions`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_actions` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `instance_id` varchar(36) DEFAULT NULL,
+ `description` varchar(64) DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
+ `number_tasks` smallint(6) NOT NULL DEFAULT '1',
+ `number_done` smallint(6) NOT NULL DEFAULT '0',
+ `number_failed` smallint(6) NOT NULL DEFAULT '0',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_actions_tenants` (`tenant_id`),
+ CONSTRAINT `FK_actions_tenant` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contains client actions over instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_classifications`
+--
+
+DROP TABLE IF EXISTS `instance_classifications`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_classifications` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_classification_id` varchar(300) DEFAULT NULL,
+ `sce_classifier_match_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_classifications_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_classifications_sce_classifier_matches` (`sce_classifier_match_id`),
+ KEY `FK_instance_classifications_datacenters` (`datacenter_id`),
+ KEY `FK_instance_classifications_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_classifications_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_classifications_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_classifications_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_classifications_sce_classifier_matches` FOREIGN KEY (`sce_classifier_match_id`) REFERENCES `sce_classifier_matches` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_interfaces`
+--
+
+DROP TABLE IF EXISTS `instance_interfaces`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_interfaces` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_vm_id` varchar(36) NOT NULL,
+ `instance_net_id` varchar(36) NOT NULL,
+ `instance_wim_net_id` varchar(36) DEFAULT NULL,
+ `interface_id` varchar(36) DEFAULT NULL,
+ `vim_interface_id` varchar(300) DEFAULT NULL,
+ `mac_address` varchar(32) DEFAULT NULL,
+ `ip_address` varchar(64) DEFAULT NULL,
+ `vim_info` text,
+ `type` enum('internal','external') NOT NULL COMMENT 'Indicates if this interface is external to a vnf, or internal',
+ `model` varchar(12) DEFAULT NULL,
+ `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
+ `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
+ `sdn_port_id` varchar(36) DEFAULT NULL COMMENT 'Port id in ovim',
+ `compute_node` varchar(100) DEFAULT NULL COMMENT 'Compute node id used to specify the SDN port mapping',
+ `pci` varchar(50) DEFAULT NULL COMMENT 'PCI of the physical port in the host',
+ `vlan` smallint(5) unsigned DEFAULT NULL COMMENT 'VLAN tag used by the port',
+ `created_at` double DEFAULT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_vms` (`instance_vm_id`),
+ KEY `FK_instance_nets` (`instance_net_id`),
+ KEY `FK_instance_ids` (`interface_id`),
+ CONSTRAINT `FK_instance_ids` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_nets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_vms` FOREIGN KEY (`instance_vm_id`) REFERENCES `instance_vms` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Table with all running associattion among VM instances and net instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_nets`
+--
+
+DROP TABLE IF EXISTS `instance_nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `vim_net_id` varchar(300) DEFAULT NULL,
+ `vim_name` varchar(255) DEFAULT NULL,
+ `instance_scenario_id` varchar(36) DEFAULT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `net_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) NOT NULL,
+ `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `sdn_net_id` varchar(36) DEFAULT NULL COMMENT 'Network id in ovim',
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_nets_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_nets_sce_nets` (`sce_net_id`),
+ KEY `FK_instance_nets_nets` (`net_id`),
+ KEY `FK_instance_nets_datacenters` (`datacenter_id`),
+ KEY `FK_instance_nets_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_nets_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_nets_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of networks';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_scenarios`
+--
+
+DROP TABLE IF EXISTS `instance_scenarios`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_scenarios` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `scenario_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `datacenter_tenant_id` varchar(36) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `cloud_config` mediumtext,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
+ KEY `FK_instance_scenarios_vim_tenants` (`datacenter_tenant_id`),
+ KEY `FK_instance_scenarios_datacenters` (`datacenter_id`),
+ KEY `FK_instance_scenarios_scenarios` (`scenario_id`),
+ CONSTRAINT `FK_instance_scenarios_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_scenarios_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_scenarios_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfis`
+--
+
+DROP TABLE IF EXISTS `instance_sfis`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfis` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sfi_id` varchar(300) DEFAULT NULL,
+ `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfis_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfis_sce_rsp_hops` (`sce_rsp_hop_id`),
+ KEY `FK_instance_sfis_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfis_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfis_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfis_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfis_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfis_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfps`
+--
+
+DROP TABLE IF EXISTS `instance_sfps`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfps` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sfp_id` varchar(300) DEFAULT NULL,
+ `sce_rsp_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfps_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfps_sce_rsps` (`sce_rsp_id`),
+ KEY `FK_instance_sfps_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfps_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfps_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfps_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfps_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfps_sce_rsps` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfs`
+--
+
+DROP TABLE IF EXISTS `instance_sfs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfs` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sf_id` varchar(300) DEFAULT NULL,
+ `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfs_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfs_sce_rsp_hops` (`sce_rsp_hop_id`),
+ KEY `FK_instance_sfs_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfs_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfs_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_vms`
+--
+
+DROP TABLE IF EXISTS `instance_vms`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_vms` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_vnf_id` varchar(36) NOT NULL,
+ `vim_vm_id` varchar(300) DEFAULT NULL,
+ `vm_id` varchar(36) DEFAULT NULL,
+ `vim_name` varchar(255) DEFAULT NULL,
+ `status` enum('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_vms_vms` (`vm_id`),
+ KEY `FK_instance_vms_instance_vnfs` (`instance_vnf_id`),
+ CONSTRAINT `FK_instance_vms_instance_vnfs` FOREIGN KEY (`instance_vnf_id`) REFERENCES `instance_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_vms_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VMs as part of VNF instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_vnfs`
+--
+
+DROP TABLE IF EXISTS `instance_vnfs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_vnfs` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vnf_id` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_vnfs_vnfs` (`vnf_id`),
+ KEY `FK_instance_vnfs_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_vnfs_sce_vnfs` (`sce_vnf_id`),
+ KEY `FK_instance_vnfs_datacenters` (`datacenter_id`),
+ KEY `FK_instance_vnfs_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_vnfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_vnfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_vnfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_vnfs_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VNFs as part of a scenario';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_wim_nets`
+--
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_wim_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `wim_internal_id` varchar(300) DEFAULT NULL COMMENT 'Internal ID used by the WIM to refer to the network',
+ `instance_scenario_id` varchar(36) DEFAULT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `wim_id` varchar(36) DEFAULT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','WIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `wim_info` text,
+ `related` varchar(36) DEFAULT NULL,
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at WIM',
+ `sdn` enum('true','false') NOT NULL DEFAULT 'false',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+ KEY `FK_instance_wim_nets_wims` (`wim_id`),
+ KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+ CONSTRAINT `FK_instance_wim_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_wim_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_wim_nets_wim_accounts` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+ CONSTRAINT `FK_instance_wim_nets_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of wim networks';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `interfaces`
+--
+
+DROP TABLE IF EXISTS `interfaces`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `interfaces` (
+ `uuid` varchar(36) NOT NULL,
+ `internal_name` varchar(255) NOT NULL,
+ `external_name` varchar(255) DEFAULT NULL,
+ `vm_id` varchar(36) NOT NULL,
+ `net_id` varchar(36) DEFAULT NULL,
+ `type` enum('mgmt','bridge','data') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+ `vpci` char(12) DEFAULT NULL,
+ `bw` mediumint(8) unsigned DEFAULT NULL COMMENT 'BW expressed in Mbits/s. Maybe this field is not necessary.',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `model` varchar(12) DEFAULT NULL,
+ `mac` char(18) DEFAULT NULL,
+ `ip_address` varchar(64) DEFAULT NULL,
+ `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
+ `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `internal_name_vm_id` (`internal_name`,`vm_id`),
+ KEY `FK_interfaces_vms` (`vm_id`),
+ KEY `FK_interfaces_nets` (`net_id`),
+ CONSTRAINT `FK_interfaces_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
+ CONSTRAINT `FK_interfaces_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM interfaces';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ip_profiles`
+--
+
+DROP TABLE IF EXISTS `ip_profiles`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `ip_profiles` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `net_id` varchar(36) DEFAULT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `instance_net_id` varchar(36) DEFAULT NULL,
+ `ip_version` enum('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
+ `subnet_address` varchar(64) DEFAULT NULL,
+ `gateway_address` varchar(64) DEFAULT NULL,
+ `dns_address` varchar(255) DEFAULT NULL COMMENT 'dns ip list separated by semicolon',
+ `dhcp_enabled` enum('true','false') NOT NULL DEFAULT 'true',
+ `dhcp_start_address` varchar(64) DEFAULT NULL,
+ `dhcp_count` int(11) DEFAULT NULL,
+ `security_group` varchar(255) DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ KEY `FK_ipprofiles_nets` (`net_id`),
+ KEY `FK_ipprofiles_scenets` (`sce_net_id`),
+ KEY `FK_ipprofiles_instancenets` (`instance_net_id`),
+ CONSTRAINT `FK_ipprofiles_instancenets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE,
+ CONSTRAINT `FK_ipprofiles_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
+ CONSTRAINT `FK_ipprofiles_scenets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `logs`
+--
+
+DROP TABLE IF EXISTS `logs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `logs` (
+ `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
+ `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `nfvo_tenant_id` varchar(36) DEFAULT NULL,
+ `related` varchar(36) NOT NULL COMMENT 'Relevant element for the log',
+ `uuid` varchar(36) DEFAULT NULL COMMENT 'Uuid of vnf, scenario, etc. that log relates to',
+ `level` enum('panic','error','info','debug','verbose') NOT NULL,
+ `description` varchar(200) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `nets`
+--
+
+DROP TABLE IF EXISTS `nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `nets` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `vnf_id` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+ `description` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `vnf_id_name` (`vnf_id`,`name`),
+ CONSTRAINT `FK_nets_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a VNF definition. These are only the internal networks among VMs of the same VNF.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `nfvo_tenants`
+--
+
+DROP TABLE IF EXISTS `nfvo_tenants`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `nfvo_tenants` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `encrypted_RO_priv_key` varchar(2000) DEFAULT NULL,
+ `RO_pub_key` varchar(510) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_classifier_matches`
+--
+
+DROP TABLE IF EXISTS `sce_classifier_matches`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifier_matches` (
+ `uuid` varchar(36) NOT NULL,
+ `ip_proto` varchar(2) NOT NULL,
+ `source_ip` varchar(16) NOT NULL,
+ `destination_ip` varchar(16) NOT NULL,
+ `source_port` varchar(5) NOT NULL,
+ `destination_port` varchar(5) NOT NULL,
+ `sce_classifier_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_classifiers_classifier_match` (`sce_classifier_id`),
+ CONSTRAINT `FK_sce_classifiers_classifier_match` FOREIGN KEY (`sce_classifier_id`) REFERENCES `sce_classifiers` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_classifiers`
+--
+
+DROP TABLE IF EXISTS `sce_classifiers`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifiers` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `sce_vnffg_id` varchar(36) NOT NULL,
+ `sce_rsp_id` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) NOT NULL,
+ `interface_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_vnffgs_classifier` (`sce_vnffg_id`),
+ KEY `FK_sce_rsps_classifier` (`sce_rsp_id`),
+ KEY `FK_sce_vnfs_classifier` (`sce_vnf_id`),
+ KEY `FK_interfaces_classifier` (`interface_id`),
+ CONSTRAINT `FK_interfaces_classifier` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_rsps_classifier` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnffgs_classifier` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnfs_classifier` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_interfaces`
+--
+
+DROP TABLE IF EXISTS `sce_interfaces`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_interfaces` (
+ `uuid` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) NOT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `interface_id` varchar(36) DEFAULT NULL,
+ `ip_address` varchar(64) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_interfaces_sce_vnfs` (`sce_vnf_id`),
+ KEY `FK_sce_interfaces_sce_nets` (`sce_net_id`),
+ KEY `FK_sce_interfaces_interfaces` (`interface_id`),
+ CONSTRAINT `FK_sce_interfaces_interfaces` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`),
+ CONSTRAINT `FK_sce_interfaces_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_interfaces_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF interfaces in a scenario definition.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_nets`
+--
+
+DROP TABLE IF EXISTS `sce_nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `scenario_id` varchar(36) DEFAULT NULL COMMENT 'NULL if net is matched to several scenarios',
+ `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+ `external` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, net is already present at VIM',
+ `description` varchar(255) DEFAULT NULL,
+ `vim_network_name` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `graph` varchar(2000) DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_nets_scenarios` (`scenario_id`),
+ CONSTRAINT `FK_sce_nets_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a scenario definition. It only considers networks among VNFs. Networks among internal VMs are only considered in tble ''nets''.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_rsp_hops`
+--
+
+DROP TABLE IF EXISTS `sce_rsp_hops`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsp_hops` (
+ `uuid` varchar(36) NOT NULL,
+ `if_order` int(11) NOT NULL DEFAULT '0',
+ `ingress_interface_id` varchar(36) NOT NULL,
+ `egress_interface_id` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) NOT NULL,
+ `sce_rsp_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_interfaces_rsp_hop` (`ingress_interface_id`),
+ KEY `FK_sce_vnfs_rsp_hop` (`sce_vnf_id`),
+ KEY `FK_sce_rsps_rsp_hop` (`sce_rsp_id`),
+ KEY `FK_interfaces_rsp_hop_egress` (`egress_interface_id`),
+ CONSTRAINT `FK_interfaces_rsp_hop_egress` FOREIGN KEY (`egress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_interfaces_rsp_hop_ingress` FOREIGN KEY (`ingress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_rsps_rsp_hop` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnfs_rsp_hop` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_rsps`
+--
+
+DROP TABLE IF EXISTS `sce_rsps`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsps` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `sce_vnffg_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_vnffgs_rsp` (`sce_vnffg_id`),
+ CONSTRAINT `FK_sce_vnffgs_rsp` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_vnffgs`
+--
+
+DROP TABLE IF EXISTS `sce_vnffgs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_vnffgs` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `vendor` varchar(255) DEFAULT NULL,
+ `scenario_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_scenarios_sce_vnffg` (`scenario_id`),
+ KEY `FK_scenarios_vnffg` (`tenant_id`),
+ CONSTRAINT `FK_scenarios_vnffg` FOREIGN KEY (`tenant_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_vnfs`
+--
+
+DROP TABLE IF EXISTS `sce_vnfs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_vnfs` (
+ `uuid` varchar(36) NOT NULL,
+ `member_vnf_index` varchar(255) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `scenario_id` varchar(36) NOT NULL,
+ `vnf_id` varchar(36) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `graph` varchar(2000) DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name_scenario_id` (`name`,`scenario_id`),
+ KEY `FK_sce_vnfs_scenarios` (`scenario_id`),
+ KEY `FK_sce_vnfs_vnfs` (`vnf_id`),
+ CONSTRAINT `FK_sce_vnfs_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNFs in scenario definitions. This table also contains the Physical Network Functions and the external elements such as MAN, Core, etc.\r\n';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `scenarios`
+--
+
+DROP TABLE IF EXISTS `scenarios`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `scenarios` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `short_name` varchar(255) DEFAULT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `vendor` varchar(255) DEFAULT NULL,
+ `public` enum('true','false') NOT NULL DEFAULT 'false',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `descriptor` text COMMENT 'Original text descriptor used for create the scenario',
+ `cloud_config` mediumtext,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
+ KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
+ CONSTRAINT `FK_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `schema_version`
+--
+
+DROP TABLE IF EXISTS `schema_version`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `schema_version` (
+ `version_int` int(11) NOT NULL COMMENT 'version as a number. Must not contain gaps',
+ `version` varchar(20) NOT NULL COMMENT 'version as a text',
+ `openmano_ver` varchar(20) NOT NULL COMMENT 'openmano version',
+ `comments` varchar(2000) DEFAULT NULL COMMENT 'changes to database',
+ `date` date DEFAULT NULL,
+ PRIMARY KEY (`version_int`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='database schema control version';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tenants_datacenters`
+--
+
+DROP TABLE IF EXISTS `tenants_datacenters`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `tenants_datacenters` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `nfvo_tenant_id` varchar(36) NOT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `datacenter_tenant_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `datacenter_nfvo_tenant` (`datacenter_id`,`nfvo_tenant_id`),
+ KEY `FK_nfvo_tenants_datacenters` (`datacenter_id`),
+ KEY `FK_nfvo_tenants_vim_tenants` (`datacenter_tenant_id`),
+ KEY `FK_tenants_datacenters_nfvo_tenants` (`nfvo_tenant_id`),
+ CONSTRAINT `FK_tenants_datacenters_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_tenants_datacenters_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_tenants_datacenters_nfvo_tenants` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
+) ENGINE=InnoDB AUTO_INCREMENT=11 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `uuids`
+--
+
+DROP TABLE IF EXISTS `uuids`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `uuids` (
+ `uuid` varchar(36) NOT NULL,
+ `root_uuid` varchar(36) DEFAULT NULL COMMENT 'Some related UUIDs can be grouped by this field, so that they can be deleted at once',
+ `created_at` double NOT NULL,
+ `used_at` varchar(36) DEFAULT NULL COMMENT 'Table that uses this UUID',
+ PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with all unique IDs used to avoid UUID repetitions among different elements';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vim_wim_actions`
+--
+
+DROP TABLE IF EXISTS `vim_wim_actions`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vim_wim_actions` (
+ `instance_action_id` varchar(36) NOT NULL,
+ `task_index` int(6) NOT NULL,
+ `worker` varchar(64) DEFAULT NULL,
+ `related` varchar(36) DEFAULT NULL,
+ `datacenter_vim_id` varchar(36) DEFAULT NULL,
+ `vim_id` varchar(300) DEFAULT NULL,
+ `wim_account_id` varchar(36) DEFAULT NULL,
+ `wim_internal_id` varchar(64) DEFAULT NULL,
+ `action` varchar(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
+ `item` enum('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') NOT NULL COMMENT 'table where the item is stored',
+ `item_id` varchar(36) DEFAULT NULL COMMENT 'uuid of the entry in the table',
+ `status` enum('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') NOT NULL DEFAULT 'SCHEDULED',
+ `extra` text COMMENT 'json with params:, depends_on: for the task',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`task_index`,`instance_action_id`),
+ KEY `FK_actions_instance_actions` (`instance_action_id`),
+ KEY `FK_actions_vims` (`datacenter_vim_id`),
+ KEY `item_type_id` (`item`,`item_id`),
+ KEY `FK_actions_wims` (`wim_account_id`),
+ CONSTRAINT `FK_actions_instance_actions` FOREIGN KEY (`instance_action_id`) REFERENCES `instance_actions` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_actions_vims` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with the individual VIM actions.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vms`
+--
+
+DROP TABLE IF EXISTS `vms`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vms` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `pdu_type` varchar(255) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `vnf_id` varchar(36) NOT NULL,
+ `count` smallint(6) NOT NULL DEFAULT '1',
+ `flavor_id` varchar(36) NOT NULL COMMENT 'Link to flavor table',
+ `image_id` varchar(36) DEFAULT NULL COMMENT 'Link to image table',
+ `image_list` text COMMENT 'Alternative images',
+ `image_path` varchar(100) DEFAULT NULL COMMENT 'Path where the image of the VM is located',
+ `boot_data` text,
+ `description` varchar(255) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `availability_zone` varchar(255) DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name_vnf_id` (`name`,`vnf_id`),
+ KEY `FK_vms_vnfs` (`vnf_id`),
+ KEY `FK_vms_images` (`image_id`),
+ KEY `FK_vms_flavors` (`flavor_id`),
+ CONSTRAINT `FK_vms_flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`),
+ CONSTRAINT `FK_vms_images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`),
+ CONSTRAINT `FK_vms_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM definitions. It contains the set of VMs used by the VNF definitions.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vnfs`
+--
+
+DROP TABLE IF EXISTS `vnfs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vnfs` (
+ `uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `short_name` varchar(255) DEFAULT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `physical` enum('true','false') NOT NULL DEFAULT 'false',
+ `public` enum('true','false') NOT NULL DEFAULT 'false',
+ `description` varchar(255) DEFAULT NULL,
+ `vendor` varchar(255) DEFAULT NULL,
+ `mgmt_access` varchar(2000) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ `class` varchar(36) DEFAULT 'MISC',
+ `descriptor` text COMMENT 'Original text descriptor used for create the VNF',
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
+ KEY `FK_vnfs_nfvo_tenants` (`tenant_id`),
+ CONSTRAINT `FK_vnfs_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF definitions. This is the catalogue of VNFs. It also includes Physical Network Functions or Physical Elements.\r\n';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_accounts`
+--
+
+DROP TABLE IF EXISTS `wim_accounts`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_accounts` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) DEFAULT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `sdn` enum('true','false') NOT NULL DEFAULT 'false',
+ `user` varchar(64) DEFAULT NULL,
+ `password` varchar(64) DEFAULT NULL,
+ `config` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_wim_accounts_wims` (`wim_id`),
+ CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_nfvo_tenants`
+--
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_nfvo_tenants` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `nfvo_tenant_id` varchar(36) NOT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+ KEY `FK_wims_nfvo_tenants` (`wim_id`),
+ KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+ KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+ CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts mapping to NFVO tenants';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_port_mappings`
+--
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_port_mappings` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `wim_id` varchar(36) NOT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `device_id` varchar(64) DEFAULT NULL,
+ `device_interface_id` varchar(64) DEFAULT NULL,
+ `service_endpoint_id` varchar(256) NOT NULL,
+ `switch_dpid` varchar(64) DEFAULT NULL,
+ `switch_port` varchar(64) DEFAULT NULL,
+ `service_mapping_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `unique_wim_port_mapping` (`wim_id`,`service_endpoint_id`),
+ KEY `FK_wims_wim_physical_connections` (`wim_id`),
+ KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+ CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='WIM port mappings managed by the WIM.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wims`
+--
+
+DROP TABLE IF EXISTS `wims`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wims` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `type` varchar(36) NOT NULL DEFAULT 'odl',
+ `wim_url` varchar(150) NOT NULL,
+ `config` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIMs managed by the NFVO.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping routines for database 'mano_db'
+--
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2020-07-15 22:32:14
+
+
+
+
+
+-- MySQL dump 10.13 Distrib 5.7.30, for Linux (x86_64)
+--
+-- Host: localhost Database: {{mano_db}}
+-- ------------------------------------------------------
+-- Server version 5.7.27
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Dumping data for table `schema_version`
+--
+
+LOCK TABLES `schema_version` WRITE;
+/*!40000 ALTER TABLE `schema_version` DISABLE KEYS */;
+INSERT INTO `schema_version` VALUES
+(0,'0.0','0.0.0','Database in init process','2015-05-08'),
+(1,'0.1','0.2.2','insert schema_version','2015-05-08'),
+(2,'0.2','0.2.5','new tables images,flavors','2015-07-13'),
+(3,'0.3','0.3.3','alter vim_tenant tables','2015-07-28'),
+(4,'0.4','0.3.5','enlarge graph field at sce_vnfs/nets','2015-10-20'),
+(5,'0.5','0.4.1','Add mac address for bridge interfaces','2015-12-14'),
+(6,'0.6','0.4.2','Adding VIM status info','2015-12-22'),
+(7,'0.7','0.4.3','Changing created_at time at database','2016-01-25'),
+(8,'0.8','0.4.32','Enlarging name at database','2016-02-01'),
+(9,'0.9','0.4.33','Add ACTIVE:NoMgmtIP to instance_vms table','2016-02-05'),
+(10,'0.10','0.4.36','tenant management of vnfs,scenarios','2016-03-08'),
+(11,'0.11','0.4.43','remove unique name at scenarios,instance_scenarios','2016-07-18'),
+(12,'0.12','0.4.46','create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces','2016-08-29'),
+(13,'0.13','0.4.47','insert cloud-config at scenarios,instance_scenarios','2016-08-30'),
+(14,'0.14','0.4.57','remove unique index vim_net_id, instance_scenario_id','2016-09-26'),
+(15,'0.15','0.4.59','add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL','2016-09-27'),
+(16,'0.16','0.5.2','enlarge vim_tenant_name and id. New config at datacenter_tenants','2016-10-11'),
+(17,'0.17','0.5.3','Extra description json format of additional devices in datacenter_flavors','2016-12-20'),
+(18,'0.18','0.5.4','Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'','2017-01-09'),
+(19,'0.19','0.5.5','Extra Boot-data content at VNFC (vms)','2017-01-11'),
+(20,'0.20','0.5.9','Added columns to store dataplane connectivity info','2017-03-13'),
+(21,'0.21','0.5.15','Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles','2017-06-02'),
+(22,'0.22','0.5.16','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-06-02'),
+(23,'0.23','0.5.20','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-08-29'),
+(24,'0.24','0.5.21','Added vnfd fields','2017-08-29'),
+(25,'0.25','0.5.22','Added osm_id to vnfs,scenarios','2017-09-01'),
+(26,'0.26','0.5.23','Several changes','2017-09-09'),
+(27,'0.27','0.5.25','Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants','2017-09-29'),
+(28,'0.28','0.5.28','Adding VNFFG-related tables','2017-11-20'),
+(29,'0.29','0.5.59','Change member_vnf_index to str accordingly to the model','2018-04-11'),
+(30,'0.30','0.5.60','Add image_list to vms','2018-04-24'),
+(31,'0.31','0.5.61','Add vim_network_name to sce_nets','2018-05-03'),
+(32,'0.32','0.5.70','Add vim_name to instance vms','2018-06-28'),
+(33,'0.33','0.5.82','Add pdu information to vms','2018-11-13'),
+(34,'0.34','0.6.00','Added WIM tables','2018-09-10'),
+(35,'0.35','0.6.02','Adding ingress and egress ports for RSPs','2018-12-11'),
+(36,'0.36','0.6.03','Allow vm without image_id for PDUs','2018-12-19'),
+(37,'0.37','0.6.09','Adding the enum tags for SFC','2019-02-07'),
+(38,'0.38','0.6.11','Adding related to vim_wim_actions','2019-03-07'),
+(39,'0.39','0.6.20','Enlarge vim_id to 300 at all places','2019-05-23'),
+(40,'0.40','6.0.4','Chagnes to SDN ','2019-10-23'),
+(41,'0.41','8.0.0','Removing unique name for wims/wim_accounts','2020-07-16');
+/*!40000 ALTER TABLE `schema_version` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2020-07-15 22:32:14
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#
+#Upgrade/Downgrade openmano database preserving the content
+#
+DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+DBUSER="mano"
+DBPASS=""
+DEFAULT_DBPASS="manopw"
+DBHOST=""
+DBPORT="3306"
+DBNAME="mano_db"
+QUIET_MODE=""
+BACKUP_DIR=""
+BACKUP_FILE=""
+# TODO update it with the last database version
+LAST_DB_VERSION=42
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+
+function usage(){
+ echo -e "Usage: $0 OPTIONS [version]"
+ echo -e " Upgrades/Downgrades openmano database preserving the content."\
+ "If [version] is not provided, it is upgraded to the last version"
+ echo -e " OPTIONS"
+ echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails"
+ echo -e " -p PASS database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
+ echo -e " -P PORT database port. '$DBPORT' by default"
+ echo -e " -h HOST database host. 'localhost' by default"
+ echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
+ echo -e " -b DIR backup folder where to create rollback backup file"
+ echo -e " -q --quiet: Do not prompt for credentials and exit if cannot access to database"
+ echo -e " --help shows this help"
+}
+
+while getopts ":u:p:b:P:h:d:q-:" o; do
+ case "${o}" in
+ u)
+ DBUSER="$OPTARG"
+ ;;
+ p)
+ DBPASS="$OPTARG"
+ ;;
+ P)
+ DBPORT="$OPTARG"
+ ;;
+ d)
+ DBNAME="$OPTARG"
+ ;;
+ h)
+ DBHOST="$OPTARG"
+ ;;
+ b)
+ BACKUP_DIR="$OPTARG"
+ ;;
+ q)
+ export QUIET_MODE=yes
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && continue
+ echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+DB_VERSION=$1
+
+if [ -n "$DB_VERSION" ] ; then
+ # check it is a number and an allowed one
+ [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null ||
+ ! echo "parameter 'version' requires a integer value" >&2 || exit 1
+ if [ "$DB_VERSION" -lt 0 ] || [ "$DB_VERSION" -gt "$LAST_DB_VERSION" ] ; then
+ echo "parameter 'version' requires a valid database version between '0' and '$LAST_DB_VERSION'"\
+ "If you need an upper version, get a newer version of this script '$0'" >&2
+ exit 1
+ fi
+else
+ DB_VERSION="$LAST_DB_VERSION"
+fi
+
+# Creating temporary file
+TEMPFILE="$(mktemp -q --tmpdir "migratemanodb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+
+# Check and ask for database user password
+FIRST_TRY="yes"
+while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
+do
+ # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+ [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+ continue
+ echo "$DB_ERROR"
+ [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
+ echo -e "Provide database name and credentials (Ctrl+c to abort):"
+ read -e -p " mysql database name($DBNAME): " KK
+ [ -n "$KK" ] && DBNAME="$KK"
+ read -e -p " mysql user($DBUSER): " KK
+ [ -n "$KK" ] && DBUSER="$KK"
+ read -e -s -p " mysql password: " DBPASS
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+ FIRST_TRY=""
+ echo
+done
+
+DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
+#echo DBCMD $DBCMD
+
+#check that the database seems a openmano database
+if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
+then
+ echo " database $DBNAME does not seem to be an openmano database" >&2
+ exit 1;
+fi
+
+#GET DATABASE TARGET VERSION
+#DB_VERSION=0
+#[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1 #0.2.2 => 1
+#[ $OPENMANO_VER_NUM -ge 2005 ] && DB_VERSION=2 #0.2.5 => 2
+#[ $OPENMANO_VER_NUM -ge 3003 ] && DB_VERSION=3 #0.3.3 => 3
+#[ $OPENMANO_VER_NUM -ge 3005 ] && DB_VERSION=4 #0.3.5 => 4
+#[ $OPENMANO_VER_NUM -ge 4001 ] && DB_VERSION=5 #0.4.1 => 5
+#[ $OPENMANO_VER_NUM -ge 4002 ] && DB_VERSION=6 #0.4.2 => 6
+#[ $OPENMANO_VER_NUM -ge 4003 ] && DB_VERSION=7 #0.4.3 => 7
+#[ $OPENMANO_VER_NUM -ge 4032 ] && DB_VERSION=8 #0.4.32=> 8
+#[ $OPENMANO_VER_NUM -ge 4033 ] && DB_VERSION=9 #0.4.33=> 9
+#[ $OPENMANO_VER_NUM -ge 4036 ] && DB_VERSION=10 #0.4.36=> 10
+#[ $OPENMANO_VER_NUM -ge 4043 ] && DB_VERSION=11 #0.4.43=> 11
+#[ $OPENMANO_VER_NUM -ge 4046 ] && DB_VERSION=12 #0.4.46=> 12
+#[ $OPENMANO_VER_NUM -ge 4047 ] && DB_VERSION=13 #0.4.47=> 13
+#[ $OPENMANO_VER_NUM -ge 4057 ] && DB_VERSION=14 #0.4.57=> 14
+#[ $OPENMANO_VER_NUM -ge 4059 ] && DB_VERSION=15 #0.4.59=> 15
+#[ $OPENMANO_VER_NUM -ge 5002 ] && DB_VERSION=16 #0.5.2 => 16
+#[ $OPENMANO_VER_NUM -ge 5003 ] && DB_VERSION=17 #0.5.3 => 17
+#[ $OPENMANO_VER_NUM -ge 5004 ] && DB_VERSION=18 #0.5.4 => 18
+#[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19 #0.5.5 => 19
+#[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20 #0.5.9 => 20
+#[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21 #0.5.15 => 21
+#[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22 #0.5.16 => 22
+#[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23 #0.5.20 => 23
+#[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24 #0.5.21 => 24
+#[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25 #0.5.22 => 25
+#[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26 #0.5.24 => 26
+#[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27 #0.5.25 => 27
+#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28 #0.5.52 => 28
+#[ $OPENMANO_VER_NUM -ge 5059 ] && DB_VERSION=29 #0.5.59 => 29
+#[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30 #0.5.60 => 30
+#[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31 #0.5.61 => 31
+#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32 #0.5.70 => 32
+#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33 #0.5.82 => 33
+#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34 #0.6.00 => 34
+#[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35 #0.6.01 => 35
+#[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36 #0.6.03 => 36
+#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37 #0.6.09 => 37
+#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38 #0.6.11 => 38
+#[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39 #0.6.20 => 39
+#[ $OPENMANO_VER_NUM -ge 6000004 ] && DB_VERSION=40 #6.0.4 => 40
+#[ $OPENMANO_VER_NUM -ge 8000000 ] && DB_VERSION=41 #8.0.0 => 41
+#[ $OPENMANO_VER_NUM -ge 8000002 ] && DB_VERSION=42 #8.0.2 => 42
+# TODO ... put next versions here
+
+function upgrade_to_1(){
+ # echo " upgrade database from version 0.0 to version 0.1"
+ echo " CREATE TABLE \`schema_version\`"
+ sql "CREATE TABLE \`schema_version\` (
+ \`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps',
+ \`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text',
+ \`openmano_ver\` VARCHAR(20) NOT NULL COMMENT 'openmano version',
+ \`comments\` VARCHAR(2000) NULL COMMENT 'changes to database',
+ \`date\` DATE NULL,
+ PRIMARY KEY (\`version_int\`)
+ )
+ COMMENT='database schema control version'
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ sql "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openmano_ver\`, \`comments\`, \`date\`)
+ VALUES (1, '0.1', '0.2.2', 'insert schema_version', '2015-05-08');"
+}
+function downgrade_from_1(){
+ # echo " downgrade database from version 0.1 to version 0.0"
+ echo " DROP TABLE IF EXISTS \`schema_version\`"
+ sql "DROP TABLE IF EXISTS \`schema_version\`;"
+}
+function upgrade_to_2(){
+ # echo " upgrade database from version 0.1 to version 0.2"
+ echo " Add columns user/passwd to table 'vim_tenants'"
+ sql "ALTER TABLE vim_tenants ADD COLUMN user VARCHAR(36) NULL COMMENT 'Credentials for vim' AFTER created,
+ ADD COLUMN passwd VARCHAR(50) NULL COMMENT 'Credentials for vim' AFTER user;"
+ echo " Add table 'images' and 'datacenters_images'"
+ sql "CREATE TABLE images (
+ uuid VARCHAR(36) NOT NULL,
+ name VARCHAR(50) NOT NULL,
+ location VARCHAR(200) NOT NULL,
+ description VARCHAR(100) NULL,
+ metadata VARCHAR(400) NULL,
+ PRIMARY KEY (uuid),
+ UNIQUE INDEX location (location) )
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ sql "CREATE TABLE datacenters_images (
+ id INT NOT NULL AUTO_INCREMENT,
+ image_id VARCHAR(36) NOT NULL,
+ datacenter_id VARCHAR(36) NOT NULL,
+ vim_id VARCHAR(36) NOT NULL,
+ PRIMARY KEY (id),
+ CONSTRAINT FK__images FOREIGN KEY (image_id) REFERENCES images (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK__datacenters_i FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE )
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " migrate data from table 'vms' into 'images'"
+ sql "INSERT INTO images (uuid, name, location) SELECT DISTINCT vim_image_id, vim_image_id, image_path FROM vms;"
+ sql "INSERT INTO datacenters_images (image_id, datacenter_id, vim_id)
+ SELECT DISTINCT vim_image_id, datacenters.uuid, vim_image_id FROM vms JOIN datacenters;"
+ echo " Add table 'flavors' and 'datacenter_flavors'"
+ sql "CREATE TABLE flavors (
+ uuid VARCHAR(36) NOT NULL,
+ name VARCHAR(50) NOT NULL,
+ description VARCHAR(100) NULL,
+ disk SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+ ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+ vcpus SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+ extended VARCHAR(2000) NULL DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
+ PRIMARY KEY (uuid) )
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ sql "CREATE TABLE datacenters_flavors (
+ id INT NOT NULL AUTO_INCREMENT,
+ flavor_id VARCHAR(36) NOT NULL,
+ datacenter_id VARCHAR(36) NOT NULL,
+ vim_id VARCHAR(36) NOT NULL,
+ PRIMARY KEY (id),
+ CONSTRAINT FK__flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK__datacenters_f FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE )
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " migrate data from table 'vms' into 'flavors'"
+ sql "INSERT INTO flavors (uuid, name) SELECT DISTINCT vim_flavor_id, vim_flavor_id FROM vms;"
+ sql "INSERT INTO datacenters_flavors (flavor_id, datacenter_id, vim_id)
+ SELECT DISTINCT vim_flavor_id, datacenters.uuid, vim_flavor_id FROM vms JOIN datacenters;"
+ sql "ALTER TABLE vms ALTER vim_flavor_id DROP DEFAULT, ALTER vim_image_id DROP DEFAULT;
+ ALTER TABLE vms CHANGE COLUMN vim_flavor_id flavor_id VARCHAR(36) NOT NULL COMMENT 'Link to flavor table' AFTER vnf_id,
+ CHANGE COLUMN vim_image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER flavor_id,
+ ADD CONSTRAINT FK_vms_images FOREIGN KEY (image_id) REFERENCES images (uuid),
+ ADD CONSTRAINT FK_vms_flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid);"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (2, '0.2', '0.2.5', 'new tables images,flavors', '2015-07-13');"
+
+}
+
+function downgrade_from_2(){
+ # echo " downgrade database from version 0.2 to version 0.1"
+ echo " migrate back data from 'datacenters_images' 'datacenters_flavors' into 'vms'"
+ sql "ALTER TABLE vms ALTER image_id DROP DEFAULT, ALTER flavor_id DROP DEFAULT;
+ ALTER TABLE vms CHANGE COLUMN flavor_id vim_flavor_id VARCHAR(36) NOT NULL COMMENT 'Flavor ID in the VIM DB' AFTER vnf_id,
+ CHANGE COLUMN image_id vim_image_id VARCHAR(36) NOT NULL COMMENT 'Image ID in the VIM DB' AFTER vim_flavor_id,
+ DROP FOREIGN KEY FK_vms_flavors, DROP INDEX FK_vms_flavors,
+ DROP FOREIGN KEY FK_vms_images, DROP INDEX FK_vms_images;"
+# echo "UPDATE v SET v.vim_image_id=di.vim_id
+# FROM vms as v INNER JOIN images as i ON v.vim_image_id=i.uuid
+# INNER JOIN datacenters_images as di ON i.uuid=di.image_id;"
+ echo " Delete columns 'user/passwd' from 'vim_tenants'"
+ sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
+ echo " delete tables 'datacenter_images', 'images'"
+ sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
+ sql "DROP TABLE IF EXISTS \`images\`;"
+ echo " delete tables 'datacenter_flavors', 'flavors'"
+ sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
+ sql "DROP TABLE IF EXISTS \`flavors\`;"
+ sql "DELETE FROM schema_version WHERE version_int='2';"
+}
+
+function upgrade_to_3(){
+ # echo " upgrade database from version 0.2 to version 0.3"
+ echo " Change table 'logs', 'uuids"
+ sql "ALTER TABLE logs CHANGE COLUMN related related VARCHAR(36) NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+ sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at VARCHAR(36) NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+ echo " Add column created to table 'datacenters_images' and 'datacenters_flavors'"
+ for table in datacenters_images datacenters_flavors
+ do
+ sql "ALTER TABLE $table ADD COLUMN created ENUM('true','false') NOT NULL DEFAULT 'false'
+ COMMENT 'Indicates if it has been created by openmano, or already existed' AFTER vim_id;"
+ done
+ sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(2000) NULL DEFAULT NULL AFTER description;"
+ echo " Allow null to column 'vim_interface_id' in 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+ echo " Add column config to table 'datacenters'"
+ sql "ALTER TABLE datacenters ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL COMMENT 'extra config information in json' AFTER vim_url_admin;
+ "
+ echo " Add column datacenter_id to table 'vim_tenants'"
+ sql "ALTER TABLE vim_tenants ADD COLUMN datacenter_id VARCHAR(36) NULL COMMENT 'Datacenter of this tenant' AFTER uuid,
+ DROP INDEX name, DROP INDEX vim_tenant_id;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL COMMENT 'tenant name at VIM' AFTER datacenter_id,
+ CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+ echo "UPDATE vim_tenants as vt LEFT JOIN tenants_datacenters as td ON vt.uuid=td.vim_tenant_id
+ SET vt.datacenter_id=td.datacenter_id;"
+ sql "DELETE FROM vim_tenants WHERE datacenter_id is NULL;"
+ sql "ALTER TABLE vim_tenants ALTER datacenter_id DROP DEFAULT;
+ ALTER TABLE vim_tenants
+ CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL COMMENT 'Datacenter of this tenant' AFTER uuid;"
+ sql "ALTER TABLE vim_tenants ADD CONSTRAINT FK_vim_tenants_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid)
+ ON UPDATE CASCADE ON DELETE CASCADE;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (3, '0.3', '0.3.3', 'alter vim_tenant tables', '2015-07-28');"
+}
+
+
+function downgrade_from_3(){
+ # echo " downgrade database from version 0.3 to version 0.2"
+ echo " Change back table 'logs', 'uuids'"
+ sql "ALTER TABLE logs CHANGE COLUMN related related ENUM('nfvo_tenants','datacenters','vim_tenants','tenants_datacenters','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+ sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at ENUM('nfvo_tenants','datacenters','vim_tenants','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+ echo " Delete column created from table 'datacenters_images' and 'datacenters_flavors'"
+ for table in datacenters_images datacenters_flavors
+ do
+ sql "ALTER TABLE $table DROP COLUMN created;"
+ done
+ sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(400) NULL DEFAULT NULL AFTER description;"
+ echo " Deny back null to column 'vim_interface_id' in 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NOT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+ echo " Delete column config to table 'datacenters'"
+ sql "ALTER TABLE datacenters DROP COLUMN config;"
+ echo " Delete column datacenter_id to table 'vim_tenants'"
+ sql "ALTER TABLE vim_tenants DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_vim_tenants_datacenters;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name name VARCHAR(36) NULL DEFAULT NULL COMMENT '' AFTER uuid"
+ sql "ALTER TABLE vim_tenants ALTER name DROP DEFAULT;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN name name VARCHAR(36) NOT NULL AFTER uuid" || ! echo "Warning changing column name at vim_tenants!"
+ sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX name (name);" || ! echo "Warning add unique index name at vim_tenants!"
+ sql "ALTER TABLE vim_tenants ALTER vim_tenant_id DROP DEFAULT;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NOT NULL COMMENT 'Tenant ID in the VIM DB' AFTER name;" ||
+ ! echo "Warning changing column vim_tenant_id at vim_tenants!"
+ sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX vim_tenant_id (vim_tenant_id);" ||
+ ! echo "Warning add unique index vim_tenant_id at vim_tenants!"
+ sql "DELETE FROM schema_version WHERE version_int='3';"
+}
+
+function upgrade_to_4(){
+ # echo " upgrade database from version 0.3 to version 0.4"
+ echo " Enlarge graph field at tables 'sce_vnfs', 'sce_nets'"
+ for table in sce_vnfs sce_nets
+ do
+ sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+ done
+ sql "ALTER TABLE datacenters CHANGE COLUMN type type VARCHAR(36) NOT NULL DEFAULT 'openvim' AFTER description;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (4, '0.4', '0.3.5', 'enlarge graph field at sce_vnfs/nets', '2015-10-20');"
+}
+
+function downgrade_from_4(){
+ # echo " downgrade database from version 0.4 to version 0.3"
+ echo " Shorten back graph field at tables 'sce_vnfs', 'sce_nets'"
+ for table in sce_vnfs sce_nets
+ do
+ sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+ done
+ sql "ALTER TABLE datacenters CHANGE COLUMN type type ENUM('openvim','openstack') NOT NULL DEFAULT 'openvim' AFTER description;"
+ sql "DELETE FROM schema_version WHERE version_int='4';"
+}
+
+function upgrade_to_5(){
+ # echo " upgrade database from version 0.4 to version 0.5"
+ echo " Add 'mac' field for bridge interfaces in table 'interfaces'"
+ sql "ALTER TABLE interfaces ADD COLUMN mac CHAR(18) NULL DEFAULT NULL AFTER model;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add mac address for bridge interfaces', '2015-12-14');"
+}
+function downgrade_from_5(){
+ # echo " downgrade database from version 0.5 to version 0.4"
+ echo " Remove 'mac' field for bridge interfaces in table 'interfaces'"
+ sql "ALTER TABLE interfaces DROP COLUMN mac;"
+ sql "DELETE FROM schema_version WHERE version_int='5';"
+}
+
+function upgrade_to_6(){
+ # echo " upgrade database from version 0.5 to version 0.6"
+ echo " Add 'descriptor' field text to 'vnfd', 'scenarios'"
+ sql "ALTER TABLE vnfs ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the VNF' AFTER class;"
+ sql "ALTER TABLE scenarios ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the scenario' AFTER modified_at;"
+ echo " Add 'last_error', 'vim_info' to 'instance_vms', 'instance_nets'"
+ sql "ALTER TABLE instance_vms ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+ sql "ALTER TABLE instance_vms ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD' AFTER vim_vm_id;"
+ sql "ALTER TABLE instance_nets ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+ sql "ALTER TABLE instance_nets ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+ echo " Add 'mac_address', 'ip_address', 'vim_info' to 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces ADD COLUMN mac_address VARCHAR(32) NULL DEFAULT NULL AFTER vim_interface_id, ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac_address, ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER ip_address;"
+ echo " Add 'sce_vnf_id','datacenter_id','vim_tenant_id' field to 'instance_vnfs'"
+ sql "ALTER TABLE instance_vnfs ADD COLUMN sce_vnf_id VARCHAR(36) NULL DEFAULT NULL AFTER vnf_id, ADD CONSTRAINT FK_instance_vnfs_sce_vnfs FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+ sql "ALTER TABLE instance_vnfs ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_vnf_id, ADD CONSTRAINT FK_instance_vnfs_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+ sql "ALTER TABLE instance_vnfs ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_vnfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+ echo " Add 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field to 'instance_nets'"
+ sql "ALTER TABLE instance_nets ADD COLUMN sce_net_id VARCHAR(36) NULL DEFAULT NULL AFTER instance_scenario_id, ADD CONSTRAINT FK_instance_nets_sce_nets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+ sql "ALTER TABLE instance_nets ADD COLUMN net_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_net_id, ADD CONSTRAINT FK_instance_nets_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+ sql "ALTER TABLE instance_nets ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER net_id, ADD CONSTRAINT FK_instance_nets_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+ sql "ALTER TABLE instance_nets ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_nets_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Adding VIM status info', '2015-12-22');"
+}
+function downgrade_from_6(){
+ # echo " downgrade database from version 0.6 to version 0.5"
+ echo " Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+ sql "ALTER TABLE vnfs DROP COLUMN descriptor;"
+ sql "ALTER TABLE scenarios DROP COLUMN descriptor;"
+ echo " Remove 'last_error', 'vim_info' from 'instance_vms', 'instance_nets'"
+ sql "ALTER TABLE instance_vms DROP COLUMN error_msg, DROP COLUMN vim_info;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'CREATING' AFTER vim_vm_id;"
+ sql "ALTER TABLE instance_nets DROP COLUMN error_msg, DROP COLUMN vim_info;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+ echo " Remove 'mac_address', 'ip_address', 'vim_info' from 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN mac_address, DROP COLUMN ip_address, DROP COLUMN vim_info;"
+ echo " Remove 'sce_vnf_id','datacenter_id','vim_tenant_id' field from 'instance_vnfs'"
+ sql "ALTER TABLE instance_vnfs DROP COLUMN sce_vnf_id, DROP FOREIGN KEY FK_instance_vnfs_sce_vnfs;"
+ sql "ALTER TABLE instance_vnfs DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_vnfs_vim_tenants;"
+ sql "ALTER TABLE instance_vnfs DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_vnfs_datacenters;"
+ echo " Remove 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field from 'instance_nets'"
+ sql "ALTER TABLE instance_nets DROP COLUMN sce_net_id, DROP FOREIGN KEY FK_instance_nets_sce_nets;"
+ sql "ALTER TABLE instance_nets DROP COLUMN net_id, DROP FOREIGN KEY FK_instance_nets_nets;"
+ sql "ALTER TABLE instance_nets DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_nets_vim_tenants;"
+ sql "ALTER TABLE instance_nets DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_nets_datacenters;"
+ sql "DELETE FROM schema_version WHERE version_int='6';"
+}
+
+function upgrade_to_7(){
+ # echo " upgrade database from version 0.6 to version 0.7"
+ echo " Change created_at, modified_at from timestamp to unix float at all database"
+ for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+ do
+ echo -en " $table \r"
+ sql "ALTER TABLE $table ADD COLUMN created_at_ DOUBLE NOT NULL after created_at;"
+ echo "UPDATE $table SET created_at_=unix_timestamp(created_at);"
+ sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at DOUBLE NOT NULL;"
+ [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at DOUBLE NULL DEFAULT NULL;"
+ done
+
+ echo " Add 'descriptor' field text to 'vnfd', 'scenarios'"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (7, '0.7', '0.4.3', 'Changing created_at time at database', '2016-01-25');"
+}
+function downgrade_from_7(){
+ # echo " downgrade database from version 0.7 to version 0.6"
+ echo " Change back created_at, modified_at from unix float to timestamp at all database"
+ for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+ do
+ echo -en " $table \r"
+ sql "ALTER TABLE $table ADD COLUMN created_at_ TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP after created_at;"
+ echo "UPDATE $table SET created_at_=from_unixtime(created_at);"
+ sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP;"
+ [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at TIMESTAMP NULL DEFAULT NULL;"
+ done
+ echo " Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+ sql "DELETE FROM schema_version WHERE version_int='7';"
+}
+
+function upgrade_to_8(){
+ # echo " upgrade database from version 0.7 to version 0.8"
+ echo " Change enalarge name, description to 255 at all database"
+ for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+ do
+ echo -en " $table \r"
+ sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR(255) NOT NULL;"
+ sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;"
+ done
+ echo -en " interfaces \r"
+ sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(255) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(255) NULL DEFAULT NULL;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL;"
+ echo -en " vim_tenants \r"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(64) NULL DEFAULT NULL;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (8, '0.8', '0.4.32', 'Enlarging name at database', '2016-02-01');"
+}
+function downgrade_from_8(){
+ # echo " downgrade database from version 0.8 to version 0.7"
+ echo " Change back name,description to shorter length at all database"
+ for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+ do
+ name_length=50
+ [[ $table == flavors ]] || [[ $table == images ]] || name_length=36
+ echo -en " $table \r"
+ sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL;"
+ sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;"
+ done
+ echo -en " interfaces \r"
+ sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(25) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(25) NULL DEFAULT NULL;"
+ echo -en " vim_tenants \r"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL;"
+ sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(50) NULL DEFAULT NULL;"
+ sql "DELETE FROM schema_version WHERE version_int='8';"
+}
+function upgrade_to_9(){
+ # echo " upgrade database from version 0.8 to version 0.9"
+ echo " Add more status to 'instance_vms'"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (9, '0.9', '0.4.33', 'Add ACTIVE:NoMgmtIP to instance_vms table', '2016-02-05');"
+}
+function downgrade_from_9(){
+ # echo " downgrade database from version 0.9 to version 0.8"
+ echo " Add more status to 'instance_vms'"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+ sql "DELETE FROM schema_version WHERE version_int='9';"
+}
+function upgrade_to_10(){
+ # echo " upgrade database from version 0.9 to version 0.10"
+ echo " add tenant to 'vnfs'"
+ sql "ALTER TABLE vnfs ADD COLUMN tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER name, ADD CONSTRAINT FK_vnfs_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE SET NULL, CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'false' AFTER physical, DROP INDEX name, DROP INDEX path, DROP COLUMN path;"
+ sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+ sql "ALTER TABLE scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+ sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+ sql "ALTER TABLE instance_scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+ echo " rename 'vim_tenants' table to 'datacenter_tenants'"
+ echo "RENAME TABLE vim_tenants TO datacenter_tenants;"
+ for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+ do
+ NULL="NOT NULL"
+ [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+ sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_vim_tenants;"
+ sql "ALTER TABLE ${table} ALTER vim_tenant_id DROP DEFAULT;"
+ sql "ALTER TABLE ${table} CHANGE COLUMN vim_tenant_id datacenter_tenant_id VARCHAR(36) ${NULL} AFTER datacenter_id, ADD CONSTRAINT FK_${table}_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid); "
+ done
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (10, '0.10', '0.4.36', 'tenant management of vnfs,scenarios', '2016-03-08');"
+}
+
+function downgrade_from_10(){
+ # echo " downgrade database from version 0.10 to version 0.9"
+ echo " remove tenant from 'vnfs'"
+ sql "ALTER TABLE vnfs DROP COLUMN tenant_id, DROP FOREIGN KEY FK_vnfs_nfvo_tenants, ADD UNIQUE INDEX name (name), ADD COLUMN path VARCHAR(100) NULL DEFAULT NULL COMMENT 'Path where the YAML descriptor of the VNF can be found. NULL if it is a physical network function.' AFTER name, ADD UNIQUE INDEX path (path), CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'true' AFTER physical;"
+ sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+ sql "ALTER TABLE scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+ sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+ sql "ALTER TABLE instance_scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+ echo " rename back 'datacenter_tenants' table to 'vim_tenants'"
+ echo "RENAME TABLE datacenter_tenants TO vim_tenants;"
+ for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+ do
+ sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_datacenter_tenants;"
+ NULL="NOT NULL"
+ [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+ sql "ALTER TABLE ${table} ALTER datacenter_tenant_id DROP DEFAULT;"
+ sql "ALTER TABLE ${table} CHANGE COLUMN datacenter_tenant_id vim_tenant_id VARCHAR(36) $NULL AFTER datacenter_id, ADD CONSTRAINT FK_${table}_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid); "
+ done
+ sql "DELETE FROM schema_version WHERE version_int='10';"
+}
+
+function upgrade_to_11(){
+ # echo " upgrade database from version 0.10 to version 0.11"
+ echo " remove unique name at 'scenarios', 'instance_scenarios'"
+ sql "ALTER TABLE scenarios DROP INDEX name;"
+ sql "ALTER TABLE instance_scenarios DROP INDEX name;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (11, '0.11', '0.4.43', 'remove unique name at scenarios,instance_scenarios', '2016-07-18');"
+}
+function downgrade_from_11(){
+ # echo " downgrade database from version 0.11 to version 0.10"
+ echo " add unique name at 'scenarios', 'instance_scenarios'"
+ sql "ALTER TABLE scenarios ADD UNIQUE INDEX name (name);"
+ sql "ALTER TABLE instance_scenarios ADD UNIQUE INDEX name (name);"
+ sql "DELETE FROM schema_version WHERE version_int='11';"
+}
+
+function upgrade_to_12(){
+ # echo " upgrade database from version 0.11 to version 0.12"
+ echo " create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to 'interfaces' and 'sce_interfaces'"
+ sql "CREATE TABLE IF NOT EXISTS ip_profiles (
+ id INT(11) NOT NULL AUTO_INCREMENT,
+ net_id VARCHAR(36) NULL DEFAULT NULL,
+ sce_net_id VARCHAR(36) NULL DEFAULT NULL,
+ instance_net_id VARCHAR(36) NULL DEFAULT NULL,
+ ip_version ENUM('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
+ subnet_address VARCHAR(64) NULL DEFAULT NULL,
+ gateway_address VARCHAR(64) NULL DEFAULT NULL,
+ dns_address VARCHAR(64) NULL DEFAULT NULL,
+ dhcp_enabled ENUM('true','false') NOT NULL DEFAULT 'true',
+ dhcp_start_address VARCHAR(64) NULL DEFAULT NULL,
+ dhcp_count INT(11) NULL DEFAULT NULL,
+ PRIMARY KEY (id),
+ CONSTRAINT FK_ipprofiles_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON DELETE CASCADE,
+ CONSTRAINT FK_ipprofiles_scenets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON DELETE CASCADE,
+ CONSTRAINT FK_ipprofiles_instancenets FOREIGN KEY (instance_net_id) REFERENCES instance_nets (uuid) ON DELETE CASCADE )
+ COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.'
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ sql "ALTER TABLE interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;"
+ sql "ALTER TABLE sce_interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER interface_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (12, '0.12', '0.4.46', 'create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces', '2016-08-29');"
+}
+function downgrade_from_12(){
+ # echo " downgrade database from version 0.12 to version 0.11"
+ echo " delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
+ sql "DROP TABLE IF EXISTS ip_profiles;"
+ sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
+ sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
+ sql "DELETE FROM schema_version WHERE version_int='12';"
+}
+
+function upgrade_to_13(){
+ # echo " upgrade database from version 0.12 to version 0.13"
+ echo " add cloud_config at 'scenarios', 'instance_scenarios'"
+ sql "ALTER TABLE scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER descriptor;"
+ sql "ALTER TABLE instance_scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER modified_at;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (13, '0.13', '0.4.47', 'insert cloud-config at scenarios,instance_scenarios', '2016-08-30');"
+}
+function downgrade_from_13(){
+ # echo " downgrade database from version 0.13 to version 0.12"
+ echo " remove cloud_config at 'scenarios', 'instance_scenarios'"
+ sql "ALTER TABLE scenarios DROP COLUMN cloud_config;"
+ sql "ALTER TABLE instance_scenarios DROP COLUMN cloud_config;"
+ sql "DELETE FROM schema_version WHERE version_int='13';"
+}
+
+function upgrade_to_14(){
+ # echo " upgrade database from version 0.13 to version 0.14"
+ echo " remove unique index vim_net_id, instance_scenario_id at table 'instance_nets'"
+ sql "ALTER TABLE instance_nets DROP INDEX vim_net_id_instance_scenario_id;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN external created ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM' AFTER multipoint;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (14, '0.14', '0.4.57', 'remove unique index vim_net_id, instance_scenario_id', '2016-09-26');"
+}
+function downgrade_from_14(){
+ # echo " downgrade database from version 0.14 to version 0.13"
+ echo " remove cloud_config at 'scenarios', 'instance_scenarios'"
+ sql "ALTER TABLE instance_nets ADD UNIQUE INDEX vim_net_id_instance_scenario_id (vim_net_id, instance_scenario_id);"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN created external ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, means that it already exists at VIM' AFTER multipoint;"
+ sql "DELETE FROM schema_version WHERE version_int='14';"
+}
+
+function upgrade_to_15(){
+ # echo " upgrade database from version 0.14 to version 0.15"
+ echo " add columns 'universal_name' and 'checksum' at table 'images', add unique index universal_name_checksum, and change location to allow NULL; change column 'image_path' in table 'vms' to allow NULL"
+ sql "ALTER TABLE images ADD COLUMN checksum VARCHAR(32) NULL DEFAULT NULL AFTER name;"
+ sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+ sql "ALTER TABLE images ADD COLUMN universal_name VARCHAR(255) NULL AFTER name, CHANGE COLUMN location location VARCHAR(200) NULL AFTER checksum, ADD UNIQUE INDEX universal_name_checksum (universal_name, checksum);"
+ sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (15, '0.15', '0.4.59', 'add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL', '2016-09-27');"
+}
+function downgrade_from_15(){
+ # echo " downgrade database from version 0.15 to version 0.14"
+ echo " remove columns 'universal_name' and 'checksum' from table 'images', remove index universal_name_checksum, change location NOT NULL; change column 'image_path' in table 'vms' to NOT NULL"
+ sql "ALTER TABLE images DROP INDEX universal_name_checksum;"
+ sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+ sql "ALTER TABLE images CHANGE COLUMN location location VARCHAR(200) NOT NULL AFTER checksum;"
+ sql "ALTER TABLE images DROP COLUMN universal_name;"
+ sql "ALTER TABLE images DROP COLUMN checksum;"
+ sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NOT NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+ sql "DELETE FROM schema_version WHERE version_int='15';"
+}
+
+function upgrade_to_16(){
+ # echo " upgrade database from version 0.15 to version 0.16"
+ echo " add column 'config' at table 'datacenter_tenants', enlarge 'vim_tenant_name/id'"
+ sql "ALTER TABLE datacenter_tenants ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL AFTER passwd;"
+ sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(256) NULL DEFAULT NULL AFTER datacenter_id;"
+ sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(256) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (16, '0.16', '0.5.2', 'enlarge vim_tenant_name and id. New config at datacenter_tenants', '2016-10-11');"
+}
+function downgrade_from_16(){
+ # echo " downgrade database from version 0.16 to version 0.15"
+ echo " remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
+ sql "ALTER TABLE datacenter_tenants DROP COLUMN config;"
+ sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;"
+ sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+ sql "DELETE FROM schema_version WHERE version_int='16';"
+}
+
+function upgrade_to_17(){
+ # echo " upgrade database from version 0.16 to version 0.17"
+ echo " add column 'extended' at table 'datacenter_flavors'"
+ sql "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');"
+}
+function downgrade_from_17(){
+ # echo " downgrade database from version 0.17 to version 0.16"
+ echo " remove column 'extended' from table 'datacenter_flavors'"
+ sql "ALTER TABLE datacenters_flavors DROP COLUMN extended;"
+ sql "DELETE FROM schema_version WHERE version_int='17';"
+}
+
+function upgrade_to_18(){
+ # echo " upgrade database from version 0.17 to version 0.18"
+ echo " add columns 'floating_ip' and 'port_security' at tables 'interfaces' and 'instance_interfaces'"
+ sql "ALTER TABLE interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+ sql "ALTER TABLE interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+ sql "ALTER TABLE instance_interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+ sql "ALTER TABLE instance_interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (18, '0.18', '0.5.4', 'Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'', '2017-01-09');"
+}
+function downgrade_from_18(){
+ # echo " downgrade database from version 0.18 to version 0.17"
+ echo " remove columns 'floating_ip' and 'port_security' from tables 'interfaces' and 'instance_interfaces'"
+ sql "ALTER TABLE interfaces DROP COLUMN floating_ip;"
+ sql "ALTER TABLE interfaces DROP COLUMN port_security;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN floating_ip;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN port_security;"
+ sql "DELETE FROM schema_version WHERE version_int='18';"
+}
+
+function upgrade_to_19(){
+ # echo " upgrade database from version 0.18 to version 0.19"
+ echo " add column 'boot_data' at table 'vms'"
+ sql "ALTER TABLE vms ADD COLUMN boot_data TEXT NULL DEFAULT NULL AFTER image_path;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (19, '0.19', '0.5.5', 'Extra Boot-data content at VNFC (vms)', '2017-01-11');"
+}
+function downgrade_from_19(){
+ # echo " downgrade database from version 0.19 to version 0.18"
+ echo " remove column 'boot_data' from table 'vms'"
+ sql "ALTER TABLE vms DROP COLUMN boot_data;"
+ sql "DELETE FROM schema_version WHERE version_int='19';"
+}
+
+function upgrade_to_20(){
+ # echo " upgrade database from version 0.19 to version 0.20"
+ echo " add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+ sql "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';"
+ sql "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';"
+ sql "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';"
+ sql "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';"
+ sql "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');"
+}
+function downgrade_from_20(){
+ # echo " downgrade database from version 0.20 to version 0.19"
+ echo " remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+ sql "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN vlan;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN pci;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN compute_node;"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;"
+ sql "DELETE FROM schema_version WHERE version_int='20';"
+}
+
+function upgrade_to_21(){
+ # echo " upgrade database from version 0.20 to version 0.21"
+ echo " edit 'instance_nets' to allow instance_scenario_id=None"
+ sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NULL;"
+ echo " enlarge column 'dns_address' at table 'ip_profiles'"
+ sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(255) DEFAULT NULL NULL "\
+ "comment 'dns ip list separated by semicolon';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (21, '0.21', '0.5.15', 'Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles', '2017-06-02');"
+}
+function downgrade_from_21(){
+ # echo " downgrade database from version 0.21 to version 0.20"
+ echo " edit 'instance_nets' to disallow instance_scenario_id=None"
+ #Delete all lines with a instance_scenario_id=NULL in order to disable this option
+ sql "DELETE FROM instance_nets WHERE instance_scenario_id IS NULL;"
+ sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NOT NULL;"
+ echo " shorten column 'dns_address' at table 'ip_profiles'"
+ sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(64) DEFAULT NULL NULL;"
+ sql "DELETE FROM schema_version WHERE version_int='21';"
+}
+
+function upgrade_to_22(){
+ # echo " upgrade database from version 0.21 to version 0.22"
+ echo " Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
+ sql "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');"
+}
+function downgrade_from_22(){
+ # echo " downgrade database from version 0.22 to version 0.21"
+ echo " Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
+ sql "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+ sql "DELETE FROM schema_version WHERE version_int='22';"
+}
+
+function upgrade_to_23(){
+ # echo " upgrade database from version 0.22 to version 0.23"
+ echo " add column 'availability_zone' at table 'vms'"
+ sql "ALTER TABLE vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20',"\
+ "'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');"
+}
+function downgrade_from_23(){
+ # echo " downgrade database from version 0.23 to version 0.22"
+ echo " remove column 'availability_zone' from table 'vms'"
+ sql "ALTER TABLE vms DROP COLUMN availability_zone;"
+ sql "DELETE FROM schema_version WHERE version_int='23';"
+}
+
+function upgrade_to_24(){
+ # echo " upgrade database from version 0.23 to version 0.24"
+ echo " Add 'count' to table 'vms'"
+
+ sql "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');"
+}
+function downgrade_from_24(){
+ # echo " downgrade database from version 0.24 to version 0.23"
+ echo " Remove 'count' from table 'vms'"
+ sql "ALTER TABLE vms DROP COLUMN count;"
+ sql "DELETE FROM schema_version WHERE version_int='24';"
+}
+function upgrade_to_25(){
+ # echo " upgrade database from version 0.24 to version 0.25"
+ echo " Add 'osm_id','short_name','vendor' to tables 'vnfs', 'scenarios'"
+ for table in vnfs scenarios; do
+ sql "ALTER TABLE $table ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid, "\
+ "ADD UNIQUE INDEX osm_id_tenant_id (osm_id, tenant_id), "\
+ "ADD COLUMN short_name VARCHAR(255) NULL AFTER name, "\
+ "ADD COLUMN vendor VARCHAR(255) NULL AFTER description;"
+ done
+ sql "ALTER TABLE vnfs ADD COLUMN mgmt_access VARCHAR(2000) NULL AFTER vendor;"
+ sql "ALTER TABLE vms ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+ sql "ALTER TABLE sce_vnfs ADD COLUMN member_vnf_index SMALLINT(6) NULL DEFAULT NULL AFTER uuid;"
+ echo " Add 'security_group' to table 'ip_profiles'"
+ sql "ALTER TABLE ip_profiles ADD COLUMN security_group VARCHAR(255) NULL DEFAULT NULL AFTER dhcp_count;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (25, '0.25', '0.5.22', 'Added osm_id to vnfs,scenarios', '2017-09-01');"
+}
+function downgrade_from_25(){
+ # echo " downgrade database from version 0.25 to version 0.24"
+ echo " Remove 'osm_id','short_name','vendor' from tables 'vnfs', 'scenarios'"
+ for table in vnfs scenarios; do
+ sql "ALTER TABLE $table DROP INDEX osm_id_tenant_id, DROP COLUMN osm_id, "\
+ "DROP COLUMN short_name, DROP COLUMN vendor;"
+ done
+ sql "ALTER TABLE vnfs DROP COLUMN mgmt_access;"
+ sql "ALTER TABLE vms DROP COLUMN osm_id;"
+ sql "ALTER TABLE sce_vnfs DROP COLUMN member_vnf_index;"
+ echo " Remove 'security_group' from table 'ip_profiles'"
+ sql "ALTER TABLE ip_profiles DROP COLUMN security_group;"
+
+ sql "DELETE FROM schema_version WHERE version_int='25';"
+}
+
+function upgrade_to_26(){
+ echo " Add name to table datacenter_tenants"
+ sql "ALTER TABLE datacenter_tenants ADD COLUMN name VARCHAR(255) NULL AFTER uuid;"
+ sql "UPDATE datacenter_tenants as dt join datacenters as d on dt.datacenter_id = d.uuid set dt.name=d.name;"
+ echo " Add 'SCHEDULED' to 'status' at tables 'instance_nets', 'instance_vms'"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+ "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') "\
+ "NOT NULL DEFAULT 'BUILD';"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','DOWN','BUILD','ERROR',"\
+ "'VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD';"
+ echo " Enlarge pci at instance_interfaces to allow extended pci for SDN por mapping"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(50) NULL DEFAULT NULL COMMENT 'PCI of the "\
+ "physical port in the host' AFTER compute_node;"
+
+ for t in flavor image; do
+ echo " Change 'datacenters_${t}s' to point to datacenter_tenant, add status, vim_info"
+ sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_vim_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+ "datacenter_id, ADD COLUMN status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED',"\
+ "'SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD' AFTER vim_id, ADD COLUMN vim_info "\
+ "TEXT NULL AFTER status;"
+ sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.datacenter_id=df.datacenter_id "\
+ "set df.datacenter_vim_id=dt.uuid;"
+ sql "DELETE FROM datacenters_${t}s WHERE datacenter_vim_id is NULL;"
+ sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_vim_id datacenter_vim_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK_datacenters_${t}s_datacenter_tenants FOREIGN KEY "\
+ "(datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE;"
+ sql "ALTER TABLE datacenters_${t}s DROP FOREIGN KEY FK__datacenters_${t:0:1};"
+ sql "ALTER TABLE datacenters_${t}s DROP COLUMN datacenter_id;"
+ done
+
+ echo " Decoupling 'instance_interfaces' from scenarios/vnfs to allow scale actions"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+ sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+ "REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+ echo " Decoupling 'instance_vms' from scenarios/vnfs to allow scale actions"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(128) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+ sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+ "REFERENCES vms (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+ echo " Decoupling 'instance_nets' from scenarios/vnfs to allow scale actions"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL;"
+
+ echo " Decoupling 'instance_scenarios' from scenarios"
+ sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+ sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+ "REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+ echo " Create table instance_actions, vim_actions"
+ sql "CREATE TABLE IF NOT EXISTS instance_actions (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ instance_id VARCHAR(36) NULL DEFAULT NULL,
+ description VARCHAR(64) NULL DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
+ number_tasks SMALLINT(6) NOT NULL DEFAULT '1',
+ number_done SMALLINT(6) NOT NULL DEFAULT '0',
+ number_failed SMALLINT(6) NOT NULL DEFAULT '0',
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_actions_tenants (tenant_id),
+ CONSTRAINT FK_actions_tenant FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COMMENT='Contains client actions over instances'
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+ sql "CREATE TABLE IF NOT EXISTS vim_actions (
+ instance_action_id VARCHAR(36) NOT NULL,
+ task_index INT(6) NOT NULL,
+ datacenter_vim_id VARCHAR(36) NOT NULL,
+ vim_id VARCHAR(64) NULL DEFAULT NULL,
+ action VARCHAR(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
+ item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
+ item_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'uuid of the entry in the table',
+ status ENUM('SCHEDULED', 'BUILD', 'DONE', 'FAILED', 'SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
+ extra TEXT NULL DEFAULT NULL COMMENT 'json with params:, depends_on: for the task',
+ error_msg VARCHAR(1024) NULL DEFAULT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (task_index, instance_action_id),
+ INDEX FK_actions_instance_actions (instance_action_id),
+ CONSTRAINT FK_actions_instance_actions FOREIGN KEY (instance_action_id) REFERENCES instance_actions (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ INDEX FK_actions_vims (datacenter_vim_id),
+ CONSTRAINT FK_actions_vims FOREIGN KEY (datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COMMENT='Table with the individual VIM actions.'
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (26, '0.26', '0.5.23', 'Several changes', '2017-09-09');"
+}
+function downgrade_from_26(){
+ echo " Remove name from table datacenter_tenants"
+ sql "ALTER TABLE datacenter_tenants DROP COLUMN name;"
+ echo " Remove 'SCHEDULED' from the 'status' at tables 'instance_nets', 'instance_vms'"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+ "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR',"\
+ "'INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD';"
+ echo " Shorten back pci at instance_interfaces to allow extended pci for SDN por mapping"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(12) NULL DEFAULT NULL COMMENT 'PCI of the "\
+ "physical port in the host' AFTER compute_node;"
+
+ for t in flavor image; do
+ echo " Restore back 'datacenters_${t}s'"
+ sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+ "${t}_id, DROP COLUMN status, DROP COLUMN vim_info ;"
+ sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.uuid=df.datacenter_vim_id set "\
+ "df.datacenter_id=dt.datacenter_id;"
+ sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK__datacenters_${t:0:1} FOREIGN KEY "\
+ "(datacenter_id) REFERENCES datacenters (uuid), DROP FOREIGN KEY FK_datacenters_${t}s_datacenter_tenants, "\
+ "DROP COLUMN datacenter_vim_id;"
+ done
+
+ echo " Restore back 'instance_interfaces' coupling to scenarios/vnfs"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL;"
+ sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+ "REFERENCES interfaces (uuid);"
+
+ echo " Restore back 'instance_vms' coupling to scenarios/vnfs"
+ echo " Decoupling 'instance vms' from scenarios/vnfs to allow scale actions"
+ sql "UPDATE instance_vms SET vim_vm_id='' WHERE vim_vm_id is NULL;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+ "REFERENCES vms (uuid);"
+
+ echo " Restore back 'instance_nets' coupling to scenarios/vnfs"
+ sql "UPDATE instance_nets SET vim_net_id='' WHERE vim_net_id is NULL;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL;"
+
+ echo " Restore back 'instance_scenarios' coupling to scenarios"
+ sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+ sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NOT NULL;"
+ sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+ "REFERENCES scenarios (uuid);"
+
+ echo " Delete table instance_actions"
+ sql "DROP TABLE IF EXISTS vim_actions"
+ sql "DROP TABLE IF EXISTS instance_actions"
+ sql "DELETE FROM schema_version WHERE version_int='26';"
+}
+
+function upgrade_to_27(){
+ echo " Added 'encrypted_RO_priv_key','RO_pub_key' to table 'nfvo_tenants'"
+ sql "ALTER TABLE nfvo_tenants ADD COLUMN encrypted_RO_priv_key VARCHAR(2000) NULL AFTER description;"
+ sql "ALTER TABLE nfvo_tenants ADD COLUMN RO_pub_key VARCHAR(510) NULL AFTER encrypted_RO_priv_key;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (27, '0.27', '0.5.25', 'Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants', '2017-09-29');"
+}
+function downgrade_from_27(){
+ echo " Remove 'encrypted_RO_priv_key','RO_pub_key' from table 'nfvo_tenants'"
+ sql "ALTER TABLE nfvo_tenants DROP COLUMN encrypted_RO_priv_key;"
+ sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
+ sql "DELETE FROM schema_version WHERE version_int='27';"
+}
+function upgrade_to_28(){
+ echo " [Adding necessary tables for VNFFG]"
+ echo " Adding sce_vnffgs"
+ sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ description VARCHAR(255) NULL DEFAULT NULL,
+ vendor VARCHAR(255) NULL DEFAULT NULL,
+ scenario_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_scenarios_sce_vnffg (scenario_id),
+ CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_rsps"
+ sql "CREATE TABLE IF NOT EXISTS sce_rsps (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ sce_vnffg_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
+ CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_rsp_hops"
+ sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
+ uuid VARCHAR(36) NOT NULL,
+ if_order INT DEFAULT 0 NOT NULL,
+ interface_id VARCHAR(36) NOT NULL,
+ sce_vnf_id VARCHAR(36) NOT NULL,
+ sce_rsp_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_interfaces_rsp_hop (interface_id),
+ INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
+ INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
+ CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_classifiers"
+ sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ sce_vnffg_id VARCHAR(36) NOT NULL,
+ sce_rsp_id VARCHAR(36) NOT NULL,
+ sce_vnf_id VARCHAR(36) NOT NULL,
+ interface_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
+ INDEX FK_sce_rsps_classifier (sce_rsp_id),
+ INDEX FK_sce_vnfs_classifier (sce_vnf_id),
+ INDEX FK_interfaces_classifier (interface_id),
+ CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_classifier_matches"
+ sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
+ uuid VARCHAR(36) NOT NULL,
+ ip_proto VARCHAR(2) NOT NULL,
+ source_ip VARCHAR(16) NOT NULL,
+ destination_ip VARCHAR(16) NOT NULL,
+ source_port VARCHAR(5) NOT NULL,
+ destination_port VARCHAR(5) NOT NULL,
+ sce_classifier_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_classifiers_classifier_match (sce_classifier_id),
+ CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+ echo " [Adding necessary tables for VNFFG-SFC instance mapping]"
+ echo " Adding instance_sfis"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfis (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sfi_id varchar(36) DEFAULT NULL,
+ sce_rsp_hop_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
+ KEY FK_instance_sfis_datacenters (datacenter_id),
+ KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_sfs"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfs (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sf_id varchar(36) DEFAULT NULL,
+ sce_rsp_hop_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
+ KEY FK_instance_sfs_datacenters (datacenter_id),
+ KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_classifications"
+ sql "CREATE TABLE IF NOT EXISTS instance_classifications (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_classification_id varchar(36) DEFAULT NULL,
+ sce_classifier_match_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
+ KEY FK_instance_classifications_datacenters (datacenter_id),
+ KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_sfps"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfps (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sfp_id varchar(36) DEFAULT NULL,
+ sce_rsp_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
+ KEY FK_instance_sfps_datacenters (datacenter_id),
+ KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+
+ echo " [Altering vim_actions table]"
+ sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
+}
+function downgrade_from_28(){
+ echo " [Undo adding the VNFFG tables]"
+ echo " Dropping instance_sfps"
+ sql "DROP TABLE IF EXISTS instance_sfps;"
+ echo " Dropping sce_classifications"
+ sql "DROP TABLE IF EXISTS instance_classifications;"
+ echo " Dropping instance_sfs"
+ sql "DROP TABLE IF EXISTS instance_sfs;"
+ echo " Dropping instance_sfis"
+ sql "DROP TABLE IF EXISTS instance_sfis;"
+ echo " Dropping sce_classifier_matches"
+ echo " [Undo adding the VNFFG-SFC instance mapping tables]"
+ sql "DROP TABLE IF EXISTS sce_classifier_matches;"
+ echo " Dropping sce_classifiers"
+ sql "DROP TABLE IF EXISTS sce_classifiers;"
+ echo " Dropping sce_rsp_hops"
+ sql "DROP TABLE IF EXISTS sce_rsp_hops;"
+ echo " Dropping sce_rsps"
+ sql "DROP TABLE IF EXISTS sce_rsps;"
+ echo " Dropping sce_vnffgs"
+ sql "DROP TABLE IF EXISTS sce_vnffgs;"
+ echo " [Altering vim_actions table]"
+ sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
+ sql "DELETE FROM schema_version WHERE version_int='28';"
+}
+function upgrade_to_29(){
+ echo " Change 'member_vnf_index' from int to str at 'sce_vnfs'"
+ sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index VARCHAR(255) NULL DEFAULT NULL AFTER uuid;"
+ echo " Add osm_id to 'nets's and 'sce_nets'"
+ sql "ALTER TABLE nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+ sql "ALTER TABLE sce_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (29, '0.29', '0.5.59', 'Change member_vnf_index to str accordingly to the model', '2018-04-11');"
+}
+function downgrade_from_29(){
+ echo " Change back 'member_vnf_index' from str to int at 'sce_vnfs'"
+ sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index SMALLINT NULL DEFAULT NULL AFTER uuid;"
+ echo " Remove osm_id from 'nets's and 'sce_nets'"
+ sql "ALTER TABLE nets DROP COLUMN osm_id;"
+ sql "ALTER TABLE sce_nets DROP COLUMN osm_id;"
+ sql "DELETE FROM schema_version WHERE version_int='29';"
+}
+function upgrade_to_30(){
+ echo " Add 'image_list' at 'vms' to allocate alternative images"
+ sql "ALTER TABLE vms ADD COLUMN image_list TEXT NULL COMMENT 'Alternative images' AFTER image_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (30, '0.30', '0.5.60', 'Add image_list to vms', '2018-04-24');"
+}
+function downgrade_from_30(){
+ echo " Remove back 'image_list' from 'vms' to allocate alternative images"
+ sql "ALTER TABLE vms DROP COLUMN image_list;"
+ sql "DELETE FROM schema_version WHERE version_int='30';"
+}
+function upgrade_to_31(){
+ echo " Add 'vim_network_name' at 'sce_nets'"
+ sql "ALTER TABLE sce_nets ADD COLUMN vim_network_name VARCHAR(255) NULL DEFAULT NULL AFTER description;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (31, '0.31', '0.5.61', 'Add vim_network_name to sce_nets', '2018-05-03');"
+}
+function downgrade_from_31(){
+ echo " Remove back 'vim_network_name' from 'sce_nets'"
+ sql "ALTER TABLE sce_nets DROP COLUMN vim_network_name;"
+ sql "DELETE FROM schema_version WHERE version_int='31';"
+}
+function upgrade_to_32(){
+ echo " Add 'vim_name' to 'instance_vms'"
+ sql "ALTER TABLE instance_vms ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_vm_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (32, '0.32', '0.5.70', 'Add vim_name to instance vms', '2018-06-28');"
+}
+function downgrade_from_32(){
+ echo " Remove back 'vim_name' from 'instance_vms'"
+ sql "ALTER TABLE instance_vms DROP COLUMN vim_name;"
+ sql "DELETE FROM schema_version WHERE version_int='32';"
+}
+
+function upgrade_to_33(){
+ echo " Add PDU information to 'vms'"
+ sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
+ sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
+}
+function downgrade_from_33(){
+ echo " Remove back PDU information from 'vms'"
+ sql "ALTER TABLE vms DROP COLUMN pdu_type;"
+ sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
+ sql "DELETE FROM schema_version WHERE version_int='33';"
+}
+function upgrade_to_X(){
+ echo " change 'datacenter_nets'"
+ sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
+}
+function downgrade_from_X(){
+ echo " Change back 'datacenter_nets'"
+ sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
+}
+function upgrade_to_34() {
+ echo " Create databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function downgrade_from_34() {
+ echo " Drop databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function upgrade_to_35(){
+ echo " Create databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function downgrade_from_35(){
+ echo " Drop databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function upgrade_to_36(){
+ echo " Allow null for image_id at 'vms'"
+ sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
+ "flavor_id;"
+ echo " Enlarge config at 'wims' and 'wim_accounts'"
+ sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
+}
+function downgrade_from_36(){
+ echo " Force back not null for image_id at 'vms'"
+ sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
+ "flavor_id;"
+ # For downgrade do not restore wims/wim_accounts config to varchar 4000
+ sql "DELETE FROM schema_version WHERE version_int='36';"
+}
+function upgrade_to_37(){
+ echo " Adding the enum tags for SFC"
+ sql "ALTER TABLE vim_wim_actions " \
+ "MODIFY COLUMN item " \
+ "ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
+ "'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
+ "NOT NULL COMMENT 'table where the item is stored';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
+}
+function downgrade_from_37(){
+ echo " Adding the enum tags for SFC isn't going to be reversed"
+ # It doesn't make sense to reverse to a bug state.
+ sql "DELETE FROM schema_version WHERE version_int='37';"
+}
+function upgrade_to_38(){
+ echo " Change vim_wim_actions, add worker, related"
+ sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
+ "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
+ "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
+ "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+ sql "UPDATE vim_wim_actions set related=item_id;"
+ echo " Change DONE to FINISHED when DELETE has been completed"
+ sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
+ "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
+ "SET v1.status='FINISHED', v2.status='FINISHED';"
+ echo " Add osm_id to instance_nets"
+ sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+ echo " Add related to instance_xxxx"
+ for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
+ instance_vms
+ do
+ sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
+ sql "UPDATE $table set related=uuid;"
+ done
+ sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
+ sql "UPDATE instance_wim_nets set related=uuid;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
+
+}
+function downgrade_from_38(){
+ echo " Change vim_wim_actions, delete worker, related"
+ sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
+ sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
+ "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
+ "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+ echo " Remove related from instance_xxxx"
+ for table in instance_classifications instance_nets instance_wim_nets instance_sfis instance_sfps instance_sfs \
+ instance_vms
+ do
+ sql "ALTER TABLE $table DROP COLUMN related;"
+ done
+ echo " Remove osm_id from instance_nets"
+ sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
+ sql "DELETE FROM schema_version WHERE version_int='38';"
+}
+
+function upgrade_to_39(){
+ echo " Enlarge vim_id to 300 at all places"
+ sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+ sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+ sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NOT NULL AFTER name;"
+ sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(300)" \
+ " NULL DEFAULT NULL AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(300) NULL DEFAULT " \
+ " NULL AFTER interface_id;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER osm_id;"
+ sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER instance_vnf_id, DROP INDEX vim_vm_id;"
+ sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(300) NULL DEFAULT NULL" \
+ " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+ sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(300) NULL DEFAULT NULL" \
+ " AFTER datacenter_vim_id;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (39, '0.39', '0.6.20', 'Enlarge vim_id to 300 at all places', '2019-05-23');"
+}
+function downgrade_from_39(){
+ echo " Set vim_id to original lenght at all places"
+ sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+ sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+ sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL AFTER name;"
+ sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(36)" \
+ " NULL DEFAULT NULL AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT " \
+ " NULL AFTER interface_id;"
+ sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL" \
+ " AFTER osm_id;"
+ sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(36) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(36) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(36) NULL DEFAULT NULL" \
+ " AFTER instance_scenario_id;"
+ sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NULL DEFAULT NULL" \
+ " AFTER instance_vnf_id, ADD UNIQUE INDEX vim_vm_id (vim_vm_id);"
+ sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(128) NULL DEFAULT NULL" \
+ " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+ sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(64) NULL DEFAULT NULL" \
+ " AFTER datacenter_vim_id;"
+
+ sql "DELETE FROM schema_version WHERE version_int='39';"
+}
+function upgrade_to_40(){
+ echo " Adding instance_wim_net_id, created_at, modified_at at 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces ADD COLUMN instance_wim_net_id VARCHAR(36) NULL AFTER instance_net_id, " \
+ "ADD COLUMN model VARCHAR(12) NULL DEFAULT NULL AFTER type, " \
+ "ADD COLUMN created_at DOUBLE NULL DEFAULT NULL AFTER vlan, " \
+ "ADD COLUMN modified_at DOUBLE NULL DEFAULT NULL AFTER created_at;"
+ echo " Adding sdn to 'instance_wim_nets'"
+ sql "ALTER TABLE instance_wim_nets ADD COLUMN sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER created;"
+ echo " Change from created to sdn at 'wim_accounts'"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN created sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+ echo " Remove unique_datacenter_port_mapping at 'wim_port_mappings'"
+ sql "ALTER TABLE wim_port_mappings DROP INDEX unique_datacenter_port_mapping;"
+ echo " change 'wim_port_mappings' pop_x to device_x, adding switch_dpid, switch_port"
+ sql "ALTER TABLE wim_port_mappings ALTER pop_switch_dpid DROP DEFAULT, ALTER pop_switch_port DROP DEFAULT;"
+ sql "ALTER TABLE wim_port_mappings CHANGE COLUMN pop_switch_dpid device_id VARCHAR(64) NULL AFTER datacenter_id," \
+ " CHANGE COLUMN pop_switch_port device_interface_id VARCHAR(64) NULL AFTER device_id, " \
+ " CHANGE COLUMN wan_service_endpoint_id service_endpoint_id VARCHAR(256) NOT NULL AFTER device_interface_id, " \
+ " CHANGE COLUMN wan_service_mapping_info service_mapping_info TEXT NULL AFTER service_endpoint_id, " \
+ " ADD COLUMN switch_dpid VARCHAR(64) NULL AFTER service_endpoint_id," \
+ " ADD COLUMN switch_port VARCHAR(64) NULL AFTER switch_dpid;"
+ echo " remove unique name to 'datacenters'"
+ sql "ALTER TABLE datacenters DROP INDEX name;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (40, '0.40', '6.0.4', 'Chagnes to SDN ', '2019-10-23');"
+}
+function downgrade_from_40(){
+ echo " Removing instance_wim_net_id, created_at, modified_at from 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN instance_wim_net_id, DROP COLUMN created_at, " \
+ "DROP COLUMN modified_at, DROP COLUMN model;"
+ echo " Removing sdn from 'instance_wim_nets'"
+ sql "ALTER TABLE instance_wim_nets DROP COLUMN sdn;"
+ echo " Change back from sdn to created at 'wim_accounts'"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN sdn created ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+ echo " Restore back unique_datacenter_port_mapping at 'wim_port_mappings'"
+ echo " change 'wim_port_mappings' device_x to pop_x, remove switch_dpid, switch_port"
+ sql "ALTER TABLE wim_port_mappings ALTER device_id DROP DEFAULT, ALTER device_interface_id DROP DEFAULT;"
+ sql "ALTER TABLE wim_port_mappings CHANGE COLUMN device_id pop_switch_dpid VARCHAR(64) NOT NULL AFTER " \
+ "datacenter_id, CHANGE COLUMN device_interface_id pop_switch_port VARCHAR(64) NOT NULL AFTER pop_switch_dpid," \
+ " CHANGE COLUMN service_endpoint_id wan_service_endpoint_id VARCHAR(256) NOT NULL AFTER pop_switch_port, " \
+ " CHANGE COLUMN service_mapping_info wan_service_mapping_info TEXT NULL AFTER wan_service_endpoint_id, " \
+ " DROP COLUMN switch_dpid, DROP COLUMN switch_port;"
+ sql "ALTER TABLE wim_port_mappings ADD UNIQUE INDEX unique_datacenter_port_mapping(datacenter_id, " \
+ "pop_switch_dpid, pop_switch_port);"
+ echo " add unique name to 'datacenters'"
+ sql "ALTER TABLE datacenters ADD UNIQUE INDEX name (name);"
+ sql "DELETE FROM schema_version WHERE version_int='40';"
+}
+
+function upgrade_to_41(){
+ echo " Removing unique name at 'wims' 'wim_accounts'"
+ sql "ALTER TABLE wims DROP INDEX name;"
+ sql "ALTER TABLE wim_accounts DROP INDEX wim_name;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (41, '0.41', '8.0.0', 'Removing unique name for wims/wim_accounts', '2020-07-16');"
+}
+function downgrade_from_41(){
+ echo " Adding back unique name at 'wims' 'wim_accounts'"
+ sql "ALTER TABLE wims ADD UNIQUE INDEX name (name);"
+ sql "ALTER TABLE wim_accounts ADD UNIQUE INDEX wim_name (name);"
+ sql "DELETE FROM schema_version WHERE version_int='41';"
+}
+
+function upgrade_to_42(){
+ echo " Adding 'port_security_disable_strategy' to 'interfaces'"
+ sql "ALTER TABLE interfaces ADD COLUMN port_security_disable_strategy CHAR(25);"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (42, '0.42', '8.0.2', 'Adding port_security_disable_strategy to interfaces', '2020-10-19');"
+}
+
+function downgrade_to_42(){
+ echo " Removing 'port_security_disable_strategy' from 'interfaces'"
+ sql "ALTER TABLE interfaces DROP port_security_disable_strategy;"
+ sql "DELETE FROM schema_version WHERE version_int='42';"
+}
+
+#TODO ... put functions here
+
+
+function del_schema_version_process()
+{
+ echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
+ ! echo " ERROR writing on schema_version" >&2 || exit 1
+}
+
+function set_schema_version_process()
+{
+ echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
+ "(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
+ "'$(date +%Y-%m-%d)');" | $DBCMD ||
+ ! echo " Cannot set database at migration process writing into schema_version" >&2 || exit 1
+
+}
+
+function rollback_db()
+{
+ if echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
+ echo " Aborted! Rollback database not needed" && exit 1
+ else # migration a non empty database or Recovering a migration process
+ cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo " Aborted! Rollback database OK" &&
+ del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
+ echo " Aborted! Rollback database FAIL" && exit 1
+ fi
+}
+
+function sql() # send a sql command
+{
+ echo "$*" | $DBCMD || ! echo " ERROR with command '$*'" || rollback_db
+ return 0
+}
+
+function migrate()
+{
+ #UPGRADE DATABASE step by step
+ while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
+ do
+ echo " upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
+ DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
+ upgrade_to_${DATABASE_VER_NUM}
+ #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
+ #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
+ #$FILE_ || exit -1 # if fail return
+ done
+
+ #DOWNGRADE DATABASE step by step
+ while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
+ do
+ echo " downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
+ #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
+ #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
+ #$FILE_ || exit -1 # if fail return
+ downgrade_from_${DATABASE_VER_NUM}
+ DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
+ done
+}
+
+
+# check if current database is ok
+function check_migration_needed()
+{
+ DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
+ ! echo " ERROR cannot read from schema_version" || exit 1
+
+ if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
+ echo " Error can not get database version ($DATABASE_VER_NUM?)" >&2
+ exit 1
+ fi
+
+ [[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo " current database version '$DATABASE_VER_NUM' is ok" && return 1
+ [[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
+ echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
+ exit 1
+ return 0
+}
+
+DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
+ ! echo " ERROR cannot read from schema_version" || exit 1
+if [[ -z "$DATABASE_PROCESS" ]] ; then # migration a non empty database
+ check_migration_needed || exit 0
+ # Create a backup database content
+ [[ -n "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q "${BACKUP_DIR}/backupdb.XXXXXX.sql")
+ [[ -z "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")
+ mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
+ ! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
+ echo " Backup file '$BACKUP_FILE' created"
+ # Set schema version
+ set_schema_version_process
+ migrate
+ del_schema_version_process
+ rm -f "$BACKUP_FILE"
+elif echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
+ echo " Migrating an empty database"
+ if check_migration_needed ; then
+ migrate
+ fi
+ del_schema_version_process
+
+else # Recover Migration process
+ BACKUP_FILE=${DATABASE_PROCESS##*backup: }
+ [[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
+ exit 1
+ echo " Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
+ cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo " Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
+ if check_migration_needed ; then
+ set_schema_version_process
+ migrate
+ fi
+ del_schema_version_process
+ rm -f "$BACKUP_FILE"
+fi
+exit 0
+
+#echo done
+
--- /dev/null
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
+--
+-- Tear down database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wim_port_mappings;
+DROP TABLE IF EXISTS wim_nfvo_tenants;
+DROP TABLE IF EXISTS instance_wim_nets;
+
+ALTER TABLE `vim_wim_actions` DROP FOREIGN KEY `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `item_type_id`;
+ALTER TABLE `vim_wim_actions` MODIFY `item` enum(
+ 'datacenters_flavors',
+ 'datacenter_images',
+ 'instance_nets',
+ 'instance_vms',
+ 'instance_interfaces',
+ 'instance_sfis',
+ 'instance_sfs',
+ 'instance_classifications',
+ 'instance_sfps') NOT NULL
+ COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions` MODIFY `datacenter_vim_id` varchar(36) NOT NULL;
+ALTER TABLE `vim_wim_actions` DROP `wim_internal_id`, DROP `wim_account_id`;
+ALTER TABLE `vim_wim_actions` RENAME TO `vim_actions`;
+
+DROP TABLE IF EXISTS wim_accounts;
+DROP TABLE IF EXISTS wims;
+
+DELETE FROM schema_version WHERE version_int='34';
--- /dev/null
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
+--
+-- Removing ingress and egress ports for SFC purposes.
+-- Inserting only one port for ingress and egress.
+--
+
+ALTER TABLE sce_rsp_hops
+ DROP FOREIGN KEY FK_interfaces_rsp_hop_ingress,
+ CHANGE COLUMN ingress_interface_id interface_id VARCHAR(36) NOT NULL
+ AFTER if_order,
+ ADD CONSTRAINT FK_interfaces_rsp_hop
+ FOREIGN KEY (interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ DROP FOREIGN KEY FK_interfaces_rsp_hop_egress,
+ DROP COLUMN egress_interface_id;
+
+DELETE FROM schema_version WHERE version_int='35';
--- /dev/null
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
+--
+-- Setup database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wims;
+CREATE TABLE wims (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `type` varchar(36) NOT NULL DEFAULT 'odl',
+ `wim_url` varchar(150) NOT NULL,
+ `config` varchar(4000) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name` (`name`)
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIMs managed by the NFVO.';
+
+DROP TABLE IF EXISTS wim_accounts;
+CREATE TABLE wim_accounts (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) DEFAULT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `created` enum('true','false') NOT NULL DEFAULT 'false',
+ `user` varchar(64) DEFAULT NULL,
+ `password` varchar(64) DEFAULT NULL,
+ `config` varchar(4000) DEFAULT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `wim_name` (`wim_id`,`name`),
+ KEY `FK_wim_accounts_wims` (`wim_id`),
+ CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`)
+ REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts by the user';
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+CREATE TABLE `wim_nfvo_tenants` (
+ `id` integer NOT NULL AUTO_INCREMENT,
+ `nfvo_tenant_id` varchar(36) NOT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+ KEY `FK_wims_nfvo_tenants` (`wim_id`),
+ KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+ KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+ CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`)
+ REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`)
+ REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`)
+ REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts mapping to NFVO tenants';
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+CREATE TABLE `instance_wim_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `wim_internal_id` varchar(128) DEFAULT NULL
+ COMMENT 'Internal ID used by the WIM to refer to the network',
+ `instance_scenario_id` varchar(36) DEFAULT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `wim_id` varchar(36) DEFAULT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `status` enum(
+ 'ACTIVE',
+ 'INACTIVE',
+ 'DOWN',
+ 'BUILD',
+ 'ERROR',
+ 'WIM_ERROR',
+ 'DELETED',
+ 'SCHEDULED_CREATION',
+ 'SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `wim_info` text,
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+ `created` enum('true','false') NOT NULL DEFAULT 'false'
+ COMMENT 'Created or already exists at WIM',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+ KEY `FK_instance_wim_nets_wims` (`wim_id`),
+ KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+ CONSTRAINT `FK_instance_wim_nets_wim_accounts`
+ FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+ CONSTRAINT `FK_instance_wim_nets_wims`
+ FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`),
+ CONSTRAINT `FK_instance_wim_nets_instance_scenarios`
+ FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`)
+ ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_wim_nets_sce_nets`
+ FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`)
+ ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+ COMMENT='Instances of wim networks';
+
+ALTER TABLE `vim_actions`
+ RENAME TO `vim_wim_actions`;
+ALTER TABLE `vim_wim_actions`
+ ADD `wim_account_id` varchar(36) DEFAULT NULL AFTER `vim_id`,
+ ADD `wim_internal_id` varchar(64) DEFAULT NULL AFTER `wim_account_id`,
+ MODIFY `datacenter_vim_id` varchar(36) DEFAULT NULL,
+ MODIFY `item` enum(
+ 'datacenters_flavors',
+ 'datacenter_images',
+ 'instance_nets',
+ 'instance_vms',
+ 'instance_interfaces',
+ 'instance_sfis',
+ 'instance_sfs',
+ 'instance_classifications',
+ 'instance_sfps',
+ 'instance_wim_nets') NOT NULL
+ COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions`
+ ADD INDEX `item_type_id` (`item`, `item_id`);
+ALTER TABLE `vim_wim_actions`
+ ADD INDEX `FK_actions_wims` (`wim_account_id`);
+ALTER TABLE `vim_wim_actions`
+ ADD CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`)
+ REFERENCES `wim_accounts` (`uuid`)
+ ON UPDATE CASCADE ON DELETE CASCADE;
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+CREATE TABLE `wim_port_mappings` (
+ `id` integer NOT NULL AUTO_INCREMENT,
+ `wim_id` varchar(36) NOT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `pop_switch_dpid` varchar(64) NOT NULL,
+ `pop_switch_port` varchar(64) NOT NULL,
+ `wan_service_endpoint_id` varchar(256) NOT NULL
+ COMMENT 'In case the WIM plugin relies on the wan_service_mapping_info'
+ COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
+ /* In other words: wan_service_endpoint_id = f(wan_service_mapping_info)
+ * where f is a injective function'
+ */
+ `wan_service_mapping_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `unique_datacenter_port_mapping`
+ (`datacenter_id`, `pop_switch_dpid`, `pop_switch_port`),
+ UNIQUE KEY `unique_wim_port_mapping`
+ (`wim_id`, `wan_service_endpoint_id`),
+ KEY `FK_wims_wim_physical_connections` (`wim_id`),
+ KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+ CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`)
+ REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`)
+ REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8
+COMMENT='WIM port mappings managed by the WIM.';
+
+-- Update Schema with DB version
+INSERT INTO schema_version
+VALUES (34, '0.34', '0.6.00', 'Added WIM tables', '2018-09-10');
--- /dev/null
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
+--
+-- Adding different ingress and egress ports for SFC.
+--
+
+ALTER TABLE sce_rsp_hops
+ DROP FOREIGN KEY FK_interfaces_rsp_hop,
+ CHANGE COLUMN interface_id ingress_interface_id VARCHAR(36) NOT NULL
+ AFTER if_order,
+ ADD CONSTRAINT FK_interfaces_rsp_hop_ingress
+ FOREIGN KEY (ingress_interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ ADD COLUMN egress_interface_id VARCHAR(36) NULL DEFAULT NULL
+ AFTER ingress_interface_id;
+
+UPDATE sce_rsp_hops
+ SET egress_interface_id = ingress_interface_id;
+
+ALTER TABLE sce_rsp_hops
+ ALTER COLUMN egress_interface_id DROP DEFAULT;
+
+ALTER TABLE sce_rsp_hops
+ MODIFY COLUMN egress_interface_id VARCHAR(36) NOT NULL
+ AFTER ingress_interface_id,
+ ADD CONSTRAINT FK_interfaces_rsp_hop_egress
+ FOREIGN KEY (egress_interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE;
+
+INSERT INTO schema_version (version_int, version, openmano_ver, comments, date)
+ VALUES (35, '0.35', '0.6.02', 'Adding ingress and egress ports for RSPs', '2018-12-11');
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Base class for openmano database manipulation
+'''
+__author__="Alfonso Tierno"
+__date__ ="$4-Apr-2016 10:05:01$"
+
+import MySQLdb as mdb
+import uuid as myUuid
+from osm_ro import utils as af
+import json
+#import yaml
+import time
+import logging
+import datetime
+from contextlib import contextmanager
+from functools import wraps, partial
+from threading import Lock
+from jsonschema import validate as js_v, exceptions as js_e
+
+from .http_tools import errors as httperrors
+from .utils import Attempt, get_arg, inject_args
+
+
+RECOVERY_TIME = 3
+
+_ATTEMPT = Attempt()
+
+
+def with_transaction(fn=None, cursor=None):
+ """Decorator that can be used together with instances of the ``db_base``
+ class, to perform database actions wrapped in a commit/rollback fashion
+
+ This decorator basically executes the function inside the context object
+ given by the ``transaction`` method in ``db_base``
+
+ Arguments:
+ cursor: [Optional] cursor class
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(with_transaction, cursor=cursor)
+
+ @wraps(fn)
+ def _wrapper(self, *args, **kwargs):
+ cursor_type = None
+ if cursor == 'dict':
+ # MySQLdB define the "cursors" module attribute lazily,
+ # so we have to defer references to mdb.cursors.DictCursor
+ cursor_type = mdb.cursors.DictCursor
+
+ with self.transaction(cursor_type):
+ return fn(self, *args, **kwargs)
+
+ return _wrapper
+
+
+def retry(fn=None, max_attempts=Attempt.MAX, **info):
+ """Decorator that can be used together with instances of the ``db_base``
+ class, to replay a method again after a unexpected error.
+
+ The function being decorated needs to either be a method of ``db_base``
+ subclasses or accept an ``db_base`` instance as the first parameter.
+
+ All the extra keyword arguments will be passed to the ``_format_error``
+ method
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(retry, max_attempts=max_attempts, **info)
+
+ @wraps(fn)
+ def _wrapper(*args, **kwargs):
+ self = args[0]
+ info.setdefault('table', get_arg('table', fn, args, kwargs))
+ attempt = Attempt(max_attempts=max_attempts, info=info)
+ while attempt.countdown >= 0:
+ try:
+ return inject_args(fn, attempt=attempt)(*args, **kwargs)
+ except (mdb.Error, AttributeError) as ex:
+ self.logger.debug("Attempt #%d", attempt.number)
+ try:
+ # The format error will throw exceptions, however it can
+ # tolerate a certain amount of retries if it judges that
+ # the error can be solved with retrying
+ self._format_error(ex, attempt.countdown, **attempt.info)
+ # Anyway, unexpected/unknown errors can still be retried
+ except db_base_Exception as db_ex:
+ if (attempt.countdown < 0 or db_ex.http_code !=
+ httperrors.Internal_Server_Error):
+ raise
+
+ attempt.count += 1
+
+ return _wrapper
+
+
+def _check_valid_uuid(uuid):
+ id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+ id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
+ try:
+ js_v(uuid, id_schema)
+ return True
+ except js_e.ValidationError:
+ try:
+ js_v(uuid, id_schema2)
+ return True
+ except js_e.ValidationError:
+ return False
+ return False
+
+def _convert_datetime2str(var):
+ '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+ It enters recursively in the dict var finding this kind of variables
+ '''
+ if type(var) is dict:
+ for k,v in var.items():
+ if type(v) is datetime.datetime:
+ var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+ elif type(v) is dict or type(v) is list or type(v) is tuple:
+ _convert_datetime2str(v)
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for v in var:
+ _convert_datetime2str(v)
+
+def _convert_bandwidth(data, reverse=False, logger=None):
+ '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
+ It assumes that bandwidth is well formed
+ Attributes:
+ 'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+ 'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+ Return:
+ None
+ '''
+ if type(data) is dict:
+ for k in data.keys():
+ if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+ _convert_bandwidth(data[k], reverse, logger)
+ if "bandwidth" in data:
+ try:
+ value=str(data["bandwidth"])
+ if not reverse:
+ pos = value.find("bps")
+ if pos>0:
+ if value[pos-1]=="G": data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
+ elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) // 1000
+ else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
+ else:
+ value = int(data["bandwidth"])
+ if value % 1000 == 0:
+ data["bandwidth"] = str(value // 1000) + " Gbps"
+ else:
+ data["bandwidth"] = str(value) + " Mbps"
+ except:
+ if logger:
+ logger.error("convert_bandwidth exception for type '%s' data '%s'", type(data["bandwidth"]), data["bandwidth"])
+ return
+ if type(data) is tuple or type(data) is list:
+ for k in data:
+ if type(k) is dict or type(k) is tuple or type(k) is list:
+ _convert_bandwidth(k, reverse, logger)
+
+def _convert_str2boolean(data, items):
+ '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+ Done recursively
+ Attributes:
+ 'data': dictionary variable to be checked. None or empty is considered valid
+ 'items': tuple of keys to convert
+ Return:
+ None
+ '''
+ if type(data) is dict:
+ for k in data.keys():
+ if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+ _convert_str2boolean(data[k], items)
+ if k in items:
+ if type(data[k]) is str:
+ if data[k]=="false" or data[k]=="False" or data[k]=="0": data[k]=False
+ elif data[k]=="true" or data[k]=="True" or data[k]=="1": data[k]=True
+ elif type(data[k]) is int:
+ if data[k]==0: data[k]=False
+ elif data[k]==1: data[k]=True
+ if type(data) is tuple or type(data) is list:
+ for k in data:
+ if type(k) is dict or type(k) is tuple or type(k) is list:
+ _convert_str2boolean(k, items)
+
+class db_base_Exception(httperrors.HttpMappedError):
+ '''Common Exception for all database exceptions'''
+
+ def __init__(self, message, http_code=httperrors.Bad_Request):
+ super(db_base_Exception, self).__init__(message, http_code)
+
+class db_base():
+ tables_with_created_field=()
+
+ def __init__(self, host=None, user=None, passwd=None, database=None,
+ log_name='db', log_level=None, lock=None):
+ self.host = host
+ self.user = user
+ self.passwd = passwd
+ self.database = database
+ self.con = None
+ self.log_level=log_level
+ self.logger = logging.getLogger(log_name)
+ if self.log_level:
+ self.logger.setLevel( getattr(logging, log_level) )
+ self.lock = lock or Lock()
+
+ def connect(self, host=None, user=None, passwd=None, database=None):
+ '''Connect to specific data base.
+ The first time a valid host, user, passwd and database must be provided,
+ Following calls can skip this parameters
+ '''
+ try:
+ if host: self.host = host
+ if user: self.user = user
+ if passwd: self.passwd = passwd
+ if database: self.database = database
+
+ self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
+ self.logger.debug("DB: connected to '%s' at '%s@%s'", self.database, self.user, self.host)
+ except mdb.Error as e:
+ raise db_base_Exception("Cannot connect to DataBase '{}' at '{}@{}' Error {}: {}".format(
+ self.database, self.user, self.host, e.args[0], e.args[1]),
+ http_code = httperrors.Unauthorized )
+
+ def escape(self, value):
+ return self.con.escape(value)
+
+ def escape_string(self, value):
+ return self.con.escape_string(value)
+
+ @retry
+ @with_transaction
+ def get_db_version(self):
+ ''' Obtain the database schema version.
+ Return: (negative, text) if error or version 0.0 where schema_version table is missing
+ (version_int, version_text) if ok
+ '''
+ cmd = "SELECT version_int,version FROM schema_version"
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ highest_version_int=0
+ highest_version=""
+ for row in rows: #look for the latest version
+ if row[0]>highest_version_int:
+ highest_version_int, highest_version = row[0:2]
+ return highest_version_int, highest_version
+
+ def disconnect(self):
+ '''disconnect from specific data base'''
+ try:
+ self.con.close()
+ self.con = None
+ except mdb.Error as e:
+ self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+ return
+ except AttributeError as e: #self.con not defined
+ if e[0][-5:] == "'con'":
+ self.logger.warning("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+ return
+ else:
+ raise
+
+ def reconnect(self):
+ """Try to gracefully to the database in case of error"""
+ try:
+ self.con.ping(True) # auto-reconnect if the server is available
+ except:
+ # The server is probably not available...
+ # Let's wait a bit
+ time.sleep(RECOVERY_TIME)
+ self.con = None
+ self.connect()
+
+ def fork_connection(self):
+ """Return a new database object, with a separated connection to the
+ database (and lock), so it can act independently
+ """
+ obj = self.__class__(
+ host=self.host,
+ user=self.user,
+ passwd=self.passwd,
+ database=self.database,
+ log_name=self.logger.name,
+ log_level=self.log_level,
+ lock=Lock()
+ )
+
+ obj.connect()
+
+ return obj
+
+ @contextmanager
+ def transaction(self, cursor_type=None):
+ """DB changes that are executed inside this context will be
+ automatically rolled back in case of error.
+
+ This implementation also adds a lock, so threads sharing the same
+ connection object are synchronized.
+
+ Arguments:
+ cursor_type: default: MySQLdb.cursors.DictCursor
+
+ Yields:
+ Cursor object
+
+ References:
+ https://www.oreilly.com/library/view/mysql-cookbook-2nd/059652708X/ch15s08.html
+ https://github.com/PyMySQL/mysqlclient-python/commit/c64915b1e5c705f4fb10e86db5dcfed0b58552cc
+ """
+ # Previously MySQLdb had built-in support for that using the context
+ # API for the connection object.
+ # This support was removed in version 1.40
+ # https://github.com/PyMySQL/mysqlclient-python/blob/master/HISTORY.rst#whats-new-in-140
+ with self.lock:
+ try:
+ if self.con.get_autocommit():
+ self.con.query("BEGIN")
+
+ self.cur = self.con.cursor(cursor_type)
+ yield self.cur
+ except: # noqa
+ self.con.rollback()
+ raise
+ else:
+ self.con.commit()
+
+
+ def _format_error(self, e, tries=1, command=None,
+ extra=None, table=None, cmd=None, **_):
+ '''Creates a text error base on the produced exception
+ Params:
+ e: mdb exception
+ retry: in case of timeout, if reconnecting to database and retry, or raise and exception
+ cmd: database command that produce the exception
+ command: if the intention is update or delete
+ extra: extra information to add to some commands
+ Return
+ HTTP error in negative, formatted error text
+ ''' # the **_ ignores extra kwargs
+ table_info = ' (table `{}`)'.format(table) if table else ''
+ if cmd:
+ self.logger.debug("Exception '%s' with command '%s'%s", e, cmd, table_info)
+
+ if isinstance(e,AttributeError ):
+ self.logger.debug(str(e), exc_info=True)
+ raise db_base_Exception("DB Exception " + str(e), httperrors.Internal_Server_Error)
+ if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query
+ # Let's aways reconnect if the connection is lost
+ # so future calls are not affected.
+ self.reconnect()
+
+ if tries > 1:
+ self.logger.warning("DB Exception '%s'. Retry", str(e))
+ return
+ else:
+ raise db_base_Exception("Database connection timeout Try Again", httperrors.Request_Timeout)
+
+ fk=e.args[1].find("foreign key constraint fails")
+ if fk>=0:
+ if command=="update":
+ raise db_base_Exception("tenant_id '{}' not found.".format(extra), httperrors.Not_Found)
+ elif command=="delete":
+ raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), httperrors.Conflict)
+ de = e.args[1].find("Duplicate entry")
+ fk = e.args[1].find("for key")
+ uk = e.args[1].find("Unknown column")
+ wc = e.args[1].find("in 'where clause'")
+ fl = e.args[1].find("in 'field list'")
+ #print de, fk, uk, wc,fl
+ if de>=0:
+ if fk>=0: #error 1062
+ raise db_base_Exception(
+ "Value {} already in use for {}{}".format(
+ e.args[1][de+15:fk], e.args[1][fk+7:], table_info),
+ httperrors.Conflict)
+ if uk>=0:
+ if wc>=0:
+ raise db_base_Exception(
+ "Field {} can not be used for filtering{}".format(
+ e.args[1][uk+14:wc], table_info),
+ httperrors.Bad_Request)
+ if fl>=0:
+ raise db_base_Exception(
+ "Field {} does not exist{}".format(
+ e.args[1][uk+14:wc], table_info),
+ httperrors.Bad_Request)
+ raise db_base_Exception(
+ "Database internal Error{} {}: {}".format(
+ table_info, e.args[0], e.args[1]),
+ httperrors.Internal_Server_Error)
+
+ def __str2db_format(self, data):
+ """Convert string data to database format.
+ If data is None it returns the 'Null' text,
+ otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
+ """
+ if data is None:
+ return 'Null'
+ elif isinstance(data[1], str):
+ return json.dumps(data)
+ else:
+ return json.dumps(str(data))
+
+ def __tuple2db_format_set(self, data):
+ """Compose the needed text for a SQL SET, parameter 'data' is a pair tuple (A,B),
+ and it returns the text 'A="B"', where A is a field of a table and B is the value
+ If B is None it returns the 'A=Null' text, without surrounding Null by quotes
+ If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
+ and it ensures internal quotes of B are escaped.
+ B can be also a dict with special keys:
+ {"INCREMENT": NUMBER}, then it produce "A=A+NUMBER"
+ """
+ if data[1] is None:
+ return str(data[0]) + "=Null"
+ elif isinstance(data[1], str):
+ return str(data[0]) + '=' + json.dumps(data[1])
+ elif isinstance(data[1], dict):
+ if "INCREMENT" in data[1]:
+ return "{A}={A}{N:+d}".format(A=data[0], N=data[1]["INCREMENT"])
+ raise db_base_Exception("Format error for UPDATE field: {!r}".format(data[0]))
+ else:
+ return str(data[0]) + '=' + json.dumps(str(data[1]))
+
+ def __create_where(self, data, use_or=None):
+ """
+ Compose the needed text for a SQL WHERE, parameter 'data' can be a dict or a list of dict. By default lists are
+ concatenated with OR and dict with AND, unless parameter 'use_or' indicates other thing.
+ If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+ If value is None, it will produce 'key is null'
+ If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+ keys can be suffixed by >,<,<>,>=,<=,' LIKE ' so that this is used to compare key and value instead of "="
+ The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+ If a list, each item will be a dictionary that will be concatenated with OR by default
+ :param data: dict or list of dicts
+ :param use_or: Can be None (use default behaviour), True (use OR) or False (use AND)
+ :return: a string with the content to send to mysql
+ """
+ cmd = []
+ if isinstance(data, dict):
+ for k, v in data.items():
+ if k == "OR":
+ cmd.append("(" + self.__create_where(v, use_or=True) + ")")
+ continue
+ elif k == "AND":
+ cmd.append("(" + self.__create_where(v, use_or=False) + ")")
+ continue
+
+ if k.endswith(">") or k.endswith("<") or k.endswith("=") or k.endswith(" LIKE "):
+ pass
+ else:
+ k += "="
+
+ if v is None:
+ cmd.append(k.replace("=", " is").replace("<>", " is not") + " Null")
+ elif isinstance(v, (tuple, list)):
+ cmd2 = []
+ for v2 in v:
+ if v2 is None:
+ cmd2.append(k.replace("=", " is").replace("<>", " is not") + " Null")
+ elif isinstance(v2, str):
+ cmd2.append(k + json.dumps(v2))
+ else:
+ cmd2.append(k + json.dumps(str(v2)))
+ cmd.append("(" + " OR ".join(cmd2) + ")")
+ elif isinstance(v, str):
+ cmd.append(k + json.dumps(v))
+ else:
+ cmd.append(k + json.dumps(str(v)))
+ elif isinstance(data, (tuple, list)):
+ if use_or is None:
+ use_or = True
+ for k in data:
+ cmd.append("(" + self.__create_where(k) + ")")
+ else:
+ raise db_base_Exception("invalid WHERE clause at '{}'".format(data))
+ if use_or:
+ return " OR ".join(cmd)
+ return " AND ".join(cmd)
+
+ def __remove_quotes(self, data):
+ '''remove single quotes ' of any string content of data dictionary'''
+ for k,v in data.items():
+ if type(v) == str:
+ if "'" in v:
+ data[k] = data[k].replace("'","_")
+
+ def _update_rows(self, table, UPDATE, WHERE, modified_time=0):
+ """ Update one or several rows of a table.
+ :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+ :param table: database table to update
+ :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
+ If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+ If value is None, it will produce 'key is null'
+ If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+ keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+ The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+ If a list, each item will be a dictionary that will be concatenated with OR
+ :return: the number of updated rows, raises exception upon error
+ """
+ # gettting uuid
+ values = ",".join(map(self.__tuple2db_format_set, UPDATE.items() ))
+ if modified_time:
+ values += "{}modified_at={:f}".format("," if values else "", modified_time)
+ cmd = "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ return self.cur.rowcount
+
+ def _new_uuid(self, root_uuid=None, used_table=None, created_time=0):
+ """
+ Generate a new uuid. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+ :param root_uuid: master uuid of the transaction
+ :param used_table: the table this uuid is intended for
+ :param created_time: time of creation
+ :return: the created uuid
+ """
+
+ uuid = str(myUuid.uuid1())
+ # defining root_uuid if not provided
+ if root_uuid is None:
+ root_uuid = uuid
+ if created_time:
+ created_at = created_time
+ else:
+ created_at = time.time()
+ # inserting new uuid
+ cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(
+ uuid, root_uuid, used_table, created_at)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ return uuid
+
+ def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0, confidential_data=False):
+ ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+ Attribute
+ INSERT: dictionary with the key:value to insert
+ table: table where to insert
+ add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+ created_time: time to add to the created_at column
+ It checks presence of uuid and add one automatically otherwise
+ Return: uuid
+ '''
+
+ if add_uuid:
+ #create uuid if not provided
+ if 'uuid' not in INSERT:
+ uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
+ else:
+ uuid = str(INSERT['uuid'])
+ else:
+ uuid=None
+ if add_uuid:
+ #defining root_uuid if not provided
+ if root_uuid is None:
+ root_uuid = uuid
+ if created_time:
+ created_at = created_time
+ else:
+ created_at=time.time()
+ #inserting new uuid
+ cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(uuid, root_uuid, table, created_at)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ #insertion
+ cmd= "INSERT INTO " + table +" SET " + \
+ ",".join(map(self.__tuple2db_format_set, INSERT.items() ))
+ if created_time:
+ cmd += ",created_at={time:.9f},modified_at={time:.9f}".format(time=created_time)
+ if confidential_data:
+ index = cmd.find("SET")
+ subcmd = cmd[:index] + 'SET...'
+ self.logger.debug(subcmd)
+ else:
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ self.cur.rowcount
+ return uuid
+
+ def _get_rows(self,table,uuid):
+ cmd = "SELECT * FROM {} WHERE uuid='{}'".format(str(table), str(uuid))
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ @retry
+ @with_transaction
+ def new_row(self, table, INSERT, add_uuid=False, created_time=0, confidential_data=False):
+ ''' Add one row into a table.
+ Attribute
+ INSERT: dictionary with the key: value to insert
+ table: table where to insert
+ tenant_id: only useful for logs. If provided, logs will use this tenant_id
+ add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+ It checks presence of uuid and add one automatically otherwise
+ Return: uuid
+ '''
+ if table in self.tables_with_created_field and created_time==0:
+ created_time=time.time()
+ return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
+
+ @retry
+ @with_transaction
+ def update_rows(self, table, UPDATE, WHERE, modified_time=None, attempt=_ATTEMPT):
+ """ Update one or several rows of a table.
+ :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+ :param table: database table to update
+ :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
+ If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+ If value is None, it will produce 'key is null'
+ If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+ keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+ The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+ If a list, each item will be a dictionary that will be concatenated with OR
+ :param modified_time: Can contain the time to be set to the table row.
+ None to set automatically, 0 to do not modify it
+ :return: the number of updated rows, raises exception upon error
+ """
+ if table in self.tables_with_created_field and modified_time is None:
+ modified_time = time.time()
+
+ return self._update_rows(table, UPDATE, WHERE, modified_time)
+
+ def _delete_row_by_id_internal(self, table, uuid):
+ cmd = "DELETE FROM {} WHERE uuid = '{}'".format(table, uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ # delete uuid
+ self.cur = self.con.cursor()
+ cmd = "DELETE FROM uuids WHERE root_uuid = '{}'".format(uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ return deleted
+
+ @retry(command='delete', extra='dependencies')
+ @with_transaction
+ def delete_row_by_id(self, table, uuid):
+ return self._delete_row_by_id_internal(table, uuid)
+
+ @retry
+ def delete_row(self, attempt=_ATTEMPT, **sql_dict):
+ """ Deletes rows from a table.
+ :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+ :param FROM: string with table name (Mandatory)
+ :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
+ If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+ If value is None, it will produce 'key is null'
+ If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+ keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+ The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+ If a list, each item will be a dictionary that will be concatenated with OR
+ :return: the number of deleted rows, raises exception upon error
+ """
+ # print sql_dict
+ cmd = "DELETE FROM " + str(sql_dict['FROM'])
+ if sql_dict.get('WHERE'):
+ cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
+ if sql_dict.get('LIMIT'):
+ cmd += " LIMIT " + str(sql_dict['LIMIT'])
+
+ attempt.info['cmd'] = cmd
+
+ with self.transaction():
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ return deleted
+
+ @retry
+ @with_transaction(cursor='dict')
+ def get_rows_by_id(self, table, uuid, attempt=_ATTEMPT):
+ '''get row from a table based on uuid'''
+ cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
+ attempt.info['cmd'] = cmd
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ @retry
+ def get_rows(self, attempt=_ATTEMPT, **sql_dict):
+ """ Obtain rows from a table.
+ :param SELECT: list or tuple of fields to retrieve) (by default all)
+ :param FROM: string with table name (Mandatory)
+ :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
+ If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+ If value is None, it will produce 'key is null'
+ If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+ keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+ The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+ If a list, each item will be a dictionary that will be concatenated with OR
+ :param LIMIT: limit the number of obtained entries (Optional)
+ :param ORDER_BY: list or tuple of fields to order, add ' DESC' to each item if inverse order is required
+ :return: a list with dictionaries at each row, raises exception upon error
+ """
+ # print sql_dict
+ cmd = "SELECT "
+ if 'SELECT' in sql_dict:
+ if isinstance(sql_dict['SELECT'], (tuple, list)):
+ cmd += ",".join(map(str, sql_dict['SELECT']))
+ else:
+ cmd += sql_dict['SELECT']
+ else:
+ cmd += "*"
+
+ cmd += " FROM " + str(sql_dict['FROM'])
+ if sql_dict.get('WHERE'):
+ cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
+
+ if 'ORDER_BY' in sql_dict:
+ cmd += " ORDER BY "
+ if isinstance(sql_dict['ORDER_BY'], (tuple, list)):
+ cmd += ",".join(map(str, sql_dict['ORDER_BY']))
+ else:
+ cmd += str(sql_dict['ORDER_BY'])
+
+ if 'LIMIT' in sql_dict:
+ cmd += " LIMIT " + str(sql_dict['LIMIT'])
+
+ attempt.info['cmd'] = cmd
+
+ with self.transaction(mdb.cursors.DictCursor):
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ @retry
+ def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_several=False, WHERE_OR={}, WHERE_AND_OR="OR", attempt=_ATTEMPT):
+ ''' Obtain One row from a table based on name or uuid.
+ Attribute:
+ table: string of table name
+ uuid_name: name or uuid. If not uuid format is found, it is considered a name
+ allow_several: if False return ERROR if more than one row are found
+ error_item_text: in case of error it identifies the 'item' name for a proper output text
+ 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
+ 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
+ Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found
+ if allow_several==True, a list of dictionaries with the row or rows, error if no item is found
+ '''
+
+ if error_item_text==None:
+ error_item_text = table
+ what = 'uuid' if af.check_valid_uuid(uuid_name) else 'name'
+ cmd = " SELECT * FROM {} WHERE {}='{}'".format(table, what, uuid_name)
+ if WHERE_OR:
+ where_or = self.__create_where(WHERE_OR, use_or=True)
+ if WHERE_AND_OR == "AND":
+ cmd += " AND (" + where_or + ")"
+ else:
+ cmd += " OR " + where_or
+
+ attempt.info['cmd'] = cmd
+
+ with self.transaction(mdb.cursors.DictCursor):
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ number = self.cur.rowcount
+ if number == 0:
+ raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
+ elif number > 1 and not allow_several:
+ raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
+ if allow_several:
+ rows = self.cur.fetchall()
+ else:
+ rows = self.cur.fetchone()
+ return rows
+
+ @retry(table='uuids')
+ @with_transaction(cursor='dict')
+ def get_uuid(self, uuid):
+ '''check in the database if this uuid is already present'''
+ self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
+ rows = self.cur.fetchall()
+ return self.cur.rowcount, rows
+
+ @retry
+ @with_transaction(cursor='dict')
+ def get_uuid_from_name(self, table, name):
+ '''Searchs in table the name and returns the uuid
+ '''
+ where_text = "name='" + name +"'"
+ self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ return 0, "Name {} not found in table {}".format(name, table)
+ elif self.cur.rowcount>1:
+ return self.cur.rowcount, "More than one VNF with name {} found in table {}".format(name, table)
+ return self.cur.rowcount, rows[0]["uuid"]
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+import logging
+from functools import wraps
+
+import bottle
+import yaml
+
+Bad_Request = 400
+Unauthorized = 401
+Not_Found = 404
+Forbidden = 403
+Method_Not_Allowed = 405
+Not_Acceptable = 406
+Request_Timeout = 408
+Conflict = 409
+Service_Unavailable = 503
+Internal_Server_Error = 500
+
+
+class HttpMappedError(Exception):
+ """Base class for a new hierarchy that translate HTTP error codes
+ to python exceptions
+
+ This class accepts an extra argument ``http_code`` (integer
+ representing HTTP error codes).
+ """
+
+ def __init__(self, message, http_code=Internal_Server_Error):
+ Exception.__init__(self, message)
+ self.http_code = http_code
+
+
+class ErrorHandler(object):
+ """Defines a default strategy for handling HttpMappedError.
+
+ This class implements a wrapper (can also be used as decorator), that
+ watches out for different exceptions and log them accordingly.
+
+ Arguments:
+ logger(logging.Logger): logger object to be used to report errors
+ """
+ def __init__(self, logger=None):
+ self.logger = logger or logging.getLogger('openmano.http')
+
+ def __call__(self, function):
+ @wraps(function)
+ def _wraped(*args, **kwargs):
+ try:
+ return function(*args, **kwargs)
+ except bottle.HTTPError:
+ raise
+ except HttpMappedError as ex:
+ self.logger.error(
+ "%s error %s",
+ function.__name__, ex.http_code, exc_info=True)
+ bottle.abort(ex.http_code, str(ex))
+ except yaml.YAMLError as ex:
+ self.logger.error(
+ "YAML error while trying to serialize/unserialize fields",
+ exc_info=True)
+ bottle.abort(Bad_Request, type(ex).__name__ + ": " + str(ex))
+ except Exception as ex:
+ self.logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(Internal_Server_Error,
+ type(ex).__name__ + ": " + str(ex))
+
+ return _wraped
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from types import MethodType
+
+from bottle import Bottle
+
+
+class route(object):
+ """Decorator that stores route information, so creating the routes can be
+ postponed.
+
+ This allows methods (OOP) with bottle.
+
+ Arguments:
+ method: HTTP verb (e.g. ``'get'``, ``'post'``, ``'put'``, ...)
+ path: URL path that will be handled by the callback
+ """
+ def __init__(self, method, path, **kwargs):
+ kwargs['method'] = method.upper()
+ self.route_info = (path, kwargs)
+
+ def __call__(self, function):
+ function.route_info = self.route_info
+ return function
+
+
+class BaseHandler(object):
+ """Base class that allows isolated webapp implementation using Bottle,
+ when used in conjunction with the ``route`` decorator.
+
+ In this context, a ``Handler`` is meant to be a collection of Bottle
+ routes/callbacks related to a specific topic.
+
+ A ``Handler`` instance can produce a WSGI app that can be mounted or merged
+ inside another more general bottle app.
+
+ Example:
+
+ from http_tools.handler import Handler, route
+ from http_tools.errors import ErrorHandler
+
+ class MyHandler(Handler):
+ plugins = [ErrorHandler()]
+ url_base = '/my/url/base'
+
+ @route('GET', '/some/path/<var>')
+ def get_var(self, var):
+ return var
+
+ app = MyHandler.wsgi_app
+ # ^ Webapp with a `GET /my/url/base/some/path/<var>` route
+ """
+ _wsgi_app = None
+
+ url_base = ''
+ """String representing a path fragment to be prepended to the routes"""
+
+ plugins = []
+ """Bottle plugins to be installed when creating the WSGI app"""
+
+ @property
+ def wsgi_app(self):
+ """Create a WSGI app based on the implemented callbacks"""
+
+ if self._wsgi_app:
+ # Return if cached
+ return self._wsgi_app
+
+ app = Bottle()
+
+ members = (getattr(self, m) for m in dir(self) if m != 'wsgi_app')
+ callbacks = (m for m in members
+ if isinstance(m, MethodType) and hasattr(m, 'route_info'))
+
+ for callback in callbacks:
+ path, kwargs = callback.route_info
+ kwargs.update(callback=callback, apply=self.plugins)
+ app.route(self.url_base + path, **kwargs)
+
+ self._wsgi_app = app
+
+ return app
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+#
+# Util functions previously in `httpserver`
+#
+
+__author__ = "Alfonso Tierno, Gerardo Garcia"
+
+import json
+import logging
+
+import bottle
+import yaml
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
+from . import errors as httperrors
+from io import TextIOWrapper
+
+logger = logging.getLogger('openmano.http')
+
+
+def remove_clear_passwd(data):
+ """
+ Removes clear passwords from the data received
+ :param data: data with clear password
+ :return: data without the password information
+ """
+
+ passw = ['password: ', 'passwd: ']
+
+ for pattern in passw:
+ init = data.find(pattern)
+ while init != -1:
+ end = data.find('\n', init)
+ data = data[:init] + '{}******'.format(pattern) + data[end:]
+ init += 1
+ init = data.find(pattern, init)
+ return data
+
+
+def change_keys_http2db(data, http_db, reverse=False):
+ '''Change keys of dictionary data acording to the key_dict values
+ This allow change from http interface names to database names.
+ When reverse is True, the change is otherwise
+ Attributes:
+ data: can be a dictionary or a list
+ http_db: is a dictionary with hhtp names as keys and database names as value
+ reverse: by default change is done from http api to database.
+ If True change is done otherwise.
+ Return: None, but data is modified'''
+ if type(data) is tuple or type(data) is list:
+ for d in data:
+ change_keys_http2db(d, http_db, reverse)
+ elif type(data) is dict or type(data) is bottle.FormsDict:
+ if reverse:
+ for k,v in http_db.items():
+ if v in data: data[k]=data.pop(v)
+ else:
+ for k,v in http_db.items():
+ if k in data: data[v]=data.pop(k)
+
+
+def format_out(data):
+ '''Return string of dictionary data according to requested json, yaml, xml.
+ By default json
+ '''
+ logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, allow_unicode=True) )
+ accept = bottle.request.headers.get('Accept')
+ if accept and 'application/yaml' in accept:
+ bottle.response.content_type='application/yaml'
+ return yaml.safe_dump(
+ data, explicit_start=True, indent=4, default_flow_style=False,
+ tags=False, allow_unicode=True) #, canonical=True, default_style='"'
+ else: #by default json
+ bottle.response.content_type='application/json'
+ #return data #json no style
+ return json.dumps(data, indent=4) + "\n"
+
+
+def format_in(default_schema, version_fields=None, version_dict_schema=None, confidential_data=False):
+ """
+ Parse the content of HTTP request against a json_schema
+
+ :param default_schema: The schema to be parsed by default
+ if no version field is found in the client data.
+ In None no validation is done
+ :param version_fields: If provided it contains a tuple or list with the
+ fields to iterate across the client data to obtain the version
+ :param version_dict_schema: It contains a dictionary with the version as key,
+ and json schema to apply as value.
+ It can contain a None as key, and this is apply
+ if the client data version does not match any key
+ :return: user_data, used_schema: if the data is successfully decoded and
+ matches the schema.
+
+ Launch a bottle abort if fails
+ """
+ #print "HEADERS :" + str(bottle.request.headers.items())
+ try:
+ error_text = "Invalid header format "
+ format_type = bottle.request.headers.get('Content-Type', 'application/json')
+ if 'application/json' in format_type:
+ error_text = "Invalid json format "
+ #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
+ client_data = json.load(TextIOWrapper(bottle.request.body, encoding="utf-8")) # TODO py3
+ #client_data = bottle.request.json()
+ elif 'application/yaml' in format_type:
+ error_text = "Invalid yaml format "
+ client_data = yaml.load(bottle.request.body, Loader=yaml.Loader)
+ elif 'application/xml' in format_type:
+ bottle.abort(501, "Content-Type: application/xml not supported yet.")
+ else:
+ logger.warning('Content-Type ' + str(format_type) + ' not supported.')
+ bottle.abort(httperrors.Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
+ return
+ # if client_data == None:
+ # bottle.abort(httperrors.Bad_Request, "Content error, empty")
+ # return
+ if confidential_data:
+ logger.info('IN: %s', remove_clear_passwd (yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+ tags=False, allow_unicode=True)))
+ else:
+ logger.info('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+ tags=False, allow_unicode=True) )
+ # look for the client provider version
+ error_text = "Invalid content "
+ if not default_schema and not version_fields:
+ return client_data, None
+ client_version = None
+ used_schema = None
+ if version_fields != None:
+ client_version = client_data
+ for field in version_fields:
+ if field in client_version:
+ client_version = client_version[field]
+ else:
+ client_version=None
+ break
+ if client_version == None:
+ used_schema = default_schema
+ elif version_dict_schema != None:
+ if client_version in version_dict_schema:
+ used_schema = version_dict_schema[client_version]
+ elif None in version_dict_schema:
+ used_schema = version_dict_schema[None]
+ if used_schema==None:
+ bottle.abort(httperrors.Bad_Request, "Invalid schema version or missing version field")
+
+ js_v(client_data, used_schema)
+ return client_data, used_schema
+ except (TypeError, ValueError, yaml.YAMLError) as exc:
+ error_text += str(exc)
+ logger.error(error_text)
+ bottle.abort(httperrors.Bad_Request, error_text)
+ except js_e.ValidationError as exc:
+ logger.error(
+ "validate_in error, jsonschema exception")
+ error_pos = ""
+ if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
+ bottle.abort(httperrors.Bad_Request, error_text + exc.message + error_pos)
+ #except:
+ # bottle.abort(httperrors.Bad_Request, "Content error: Failed to parse Content-Type", error_pos)
+ # raise
+
+def filter_query_string(qs, http2db, allowed):
+ '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
+ Attributes:
+ 'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
+ 'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
+ 'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
+ Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
+ select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
+ where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
+ limit: limit dictated by user with the query string 'limit'. 100 by default
+ abort if not permited, using bottel.abort
+ '''
+ where={}
+ limit=100
+ select=[]
+ #if type(qs) is not bottle.FormsDict:
+ # bottle.abort(httperrors.Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
+ # #bottle.abort(httperrors.Internal_Server_Error, "call programmer")
+ for k in qs:
+ if k=='field':
+ select += qs.getall(k)
+ for v in select:
+ if v not in allowed:
+ bottle.abort(httperrors.Bad_Request, "Invalid query string at 'field="+v+"'")
+ elif k=='limit':
+ try:
+ limit=int(qs[k])
+ except:
+ bottle.abort(httperrors.Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
+ else:
+ if k not in allowed:
+ bottle.abort(httperrors.Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
+ if qs[k]!="null": where[k]=qs[k]
+ else: where[k]=None
+ if len(select)==0: select += allowed
+ #change from http api to database naming
+ for i in range(0,len(select)):
+ k=select[i]
+ if http2db and k in http2db:
+ select[i] = http2db[k]
+ if http2db:
+ change_keys_http2db(where, http2db)
+ #print "filter_query_string", select,where,limit
+
+ return select,where,limit
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+import unittest
+
+import bottle
+
+from .. import errors as httperrors
+from ...tests.helpers import TestCaseWithLogging
+
+
+class TestHttpErrors(TestCaseWithLogging):
+ def test_http_error_base(self):
+ # When an error code is passed as argument
+ ex = httperrors.HttpMappedError(http_code=1226324)
+ # then it should be set in the exception object
+ self.assertEqual(ex.http_code, 1226324)
+ # When an error code is not passed as argument
+ ex = httperrors.HttpMappedError()
+ # then the default error code (internal server error) should be used
+ self.assertEqual(ex.http_code, httperrors.Internal_Server_Error)
+
+ def test_error_handler_should_log_unexpected_errors(self):
+ # Given a error handler wraps a function
+ error_handler = httperrors.ErrorHandler(self.logger)
+
+ # and the function raises an unexpected error
+ @error_handler
+ def _throw():
+ raise AttributeError('some error')
+
+ # when the function is called
+ with self.assertRaises(bottle.HTTPError):
+ _throw()
+ logs = self.caplog.getvalue()
+ # then the exception should be contained by bottle
+ # and a proper message should be logged
+ assert "Unexpected exception:" in logs
+
+ def test_error_handler_should_log_http_based_errors(self):
+ # Given a error handler wraps a function
+ error_handler = httperrors.ErrorHandler(self.logger)
+
+ # and the function raises an error that is considered by the
+ # application
+ @error_handler
+ def _throw():
+ raise httperrors.HttpMappedError(http_code=404)
+
+ # when the function is called
+ with self.assertRaises(bottle.HTTPError):
+ _throw()
+ logs = self.caplog.getvalue()
+ # then the exception should be contained by bottle
+ # and a proper message should be logged
+ assert "_throw error 404" in logs
+
+ def test_error_handler_should_ignore_bottle_errors(self):
+ # Given a error handler wraps a function
+ error_handler = httperrors.ErrorHandler(self.logger)
+
+ # and the function raises an error that is considered by the
+ # application
+ exception = bottle.HTTPError()
+
+ @error_handler
+ def _throw():
+ raise exception
+
+ # when the function is called
+ with self.assertRaises(bottle.HTTPError) as context:
+ _throw()
+ # then the exception should bypass the error handler
+ self.assertEqual(context.exception, exception)
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+import unittest
+
+from mock import MagicMock, patch
+from webtest import TestApp
+
+from .. import handler
+from ..handler import BaseHandler, route
+
+
+class TestIntegration(unittest.TestCase):
+ def test_wsgi_app(self):
+ # Given a Handler class that implements a route
+ some_plugin = MagicMock()
+
+ class MyHandler(BaseHandler):
+ url_base = '/42'
+ plugins = [some_plugin]
+
+ @route('get', '/some/path')
+ def callback(self):
+ return 'some content'
+
+ route_mock = MagicMock()
+ with patch(handler.__name__+'.Bottle.route', route_mock):
+ # When we try to access wsgi_app for the first time
+ my_handler = MyHandler()
+ assert my_handler.wsgi_app
+ # then bottle.route should be called with the right arguments
+ route_mock.assert_called_once_with('/42/some/path', method='GET',
+ callback=my_handler.callback,
+ apply=[some_plugin])
+
+ # When we try to access wsgi_app for the second time
+ assert my_handler.wsgi_app
+ # then the result should be cached
+ # and bottle.route should not be called again
+ self.assertEqual(route_mock.call_count, 1)
+
+ def test_route_created(self):
+ # Given a Handler class, as in the example documentation
+ class MyHandler(BaseHandler):
+ def __init__(self):
+ self.value = 42
+
+ @route('GET', '/some/path/<param>')
+ def callback(self, param):
+ return '{} + {}'.format(self.value, param)
+
+ # when this class is used to generate a webapp
+ app = TestApp(MyHandler().wsgi_app)
+
+ # then the defined URLs should be available
+ response = app.get('/some/path/0')
+ self.assertEqual(response.status_code, 200)
+ # and the callbacks should have access to ``self``
+ response.mustcontain('42 + 0')
+
+ def test_url_base(self):
+ # Given a Handler class that allows url_base customization
+ class MyHandler(BaseHandler):
+ def __init__(self, url_base):
+ self.url_base = url_base
+
+ @route('GET', '/some/path/<param>')
+ def callback(self, param):
+ return param
+
+ # when this class is used to generate a webapp
+ app = TestApp(MyHandler('/prefix').wsgi_app)
+
+ # then the prefixed URLs should be available
+ response = app.get('/prefix/some/path/content')
+ self.assertEqual(response.status_code, 200)
+ response.mustcontain('content')
+
+ def test_starting_param(self):
+ # Given a Handler class with a route beginning with a param
+ class MyHandler(BaseHandler):
+ @route('GET', '/<param>/some/path')
+ def callback(self, param):
+ return '**{}**'.format(param)
+
+ # is used to generate a webapp
+ app = TestApp(MyHandler().wsgi_app)
+
+ # when the defined URLs is accessed
+ response = app.get('/42/some/path')
+ # Then no error should happen
+ self.assertEqual(response.status_code, 200)
+ response.mustcontain('**42**')
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,py36,flake8,radon
+skipsdist = True
+
+[testenv]
+changedir = {toxinidir}
+commands =
+ nosetests -d --with-coverage --cover-package=. {posargs:tests}
+deps =
+ WebTest
+ bottle
+ coverage
+ mock
+ nose
+ six
+ PyYaml
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+ radon cc --show-complexity --total-average {posargs:.}
+ radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+ tests
+ tests/*
+ */test_*
+ .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+ request_processing.py
+ .tox
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
+and will use the nfvo.py module to run the appropriate method.
+Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$17-sep-2014 09:07:15$"
+
+import bottle
+import yaml
+import threading
+import logging
+
+from osm_ro.openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
+ nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
+ scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
+ tenant_schema, tenant_edit_schema,\
+ datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
+ object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
+ sdn_port_mapping_schema, sdn_external_port_schema
+
+from .http_tools import errors as httperrors
+from .http_tools.request_processing import (
+ format_out,
+ format_in,
+ filter_query_string
+)
+from .wim.http_handler import WimHandler
+
+from . import nfvo
+from . import utils
+from .db_base import db_base_Exception
+from functools import wraps
+
+global mydb
+global url_base
+global logger
+url_base="/openmano"
+logger = None
+
+
+def log_to_logger(fn):
+ '''
+ Wrap a Bottle request so that a log line is emitted after it's handled.
+ (This decorator can be extended to take the desired logger as a param.)
+ '''
+ @wraps(fn)
+ def _log_to_logger(*args, **kwargs):
+ actual_response = fn(*args, **kwargs)
+ # modify this to log exactly what you need:
+ logger.info('FROM %s %s %s %s', bottle.request.remote_addr,
+ bottle.request.method,
+ bottle.request.url,
+ bottle.response.status)
+ return actual_response
+ return _log_to_logger
+
+class httpserver(threading.Thread):
+ def __init__(self, db, admin=False, host='localhost', port=9090,
+ wim_persistence=None, wim_engine=None):
+ #global url_base
+ global mydb
+ global logger
+ #initialization
+ if not logger:
+ logger = logging.getLogger('openmano.http')
+ threading.Thread.__init__(self)
+ self.host = host
+ self.port = port #Port where the listen service must be started
+ if admin==True:
+ self.name = "http_admin"
+ else:
+ self.name = "http"
+ #self.url_preffix = 'http://' + host + ':' + str(port) + url_base
+ mydb = db
+ #self.first_usable_connection_index = 10
+ #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
+ #Ensure that when the main program exits the thread will also exit
+
+ self.handlers = [
+ WimHandler(db, wim_persistence, wim_engine, url_base)
+ ]
+
+ self.daemon = True
+ self.setDaemon(True)
+
+ def run(self, debug=False, quiet=True):
+ bottle.install(log_to_logger)
+ default_app = bottle.app()
+
+ for handler in self.handlers:
+ default_app.merge(handler.wsgi_app)
+
+ bottle.run(host=self.host, port=self.port, debug=debug, quiet=quiet)
+
+
+def run_bottle(db, host_='localhost', port_=9090):
+ '''Used for launching in main thread, so that it can be debugged'''
+ server = httpserver(db, host=host_, port=port_)
+ server.run(debug=True) # quiet=True
+
+
+@bottle.route(url_base + '/', method='GET')
+def http_get():
+ #print
+ return 'works' #TODO: to be completed
+
+@bottle.hook('after_request')
+def enable_cors():
+ '''Don't know yet if really needed. Keep it just in case'''
+ bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+
+@bottle.route(url_base + '/version', method='GET')
+def http_get_version():
+ return nfvo.get_version()
+#
+# VNFs
+#
+
+@bottle.route(url_base + '/tenants', method='GET')
+def http_get_tenants():
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid','name','description','created_at') )
+ try:
+ tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ utils.convert_float_timestamp2str(tenants)
+ data={'tenants' : tenants}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except db_base_Exception as e:
+ logger.error("http_get_tenants error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
+def http_get_tenant_id(tenant_id):
+ '''get tenant details, can use both uuid or name'''
+ #obtain data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ from_ = 'nfvo_tenants'
+ select_, where_, limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid', 'name', 'description', 'created_at'))
+ what = 'uuid' if utils.check_valid_uuid(tenant_id) else 'name'
+ where_[what] = tenant_id
+ tenants = mydb.get_rows(FROM=from_, SELECT=select_,WHERE=where_)
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ if len(tenants) == 0:
+ bottle.abort(httperrors.Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
+ elif len(tenants) > 1:
+ bottle.abort(httperrors.Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
+ utils.convert_float_timestamp2str(tenants[0])
+ data = {'tenant': tenants[0]}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except db_base_Exception as e:
+ logger.error("http_get_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants', method='POST')
+def http_post_tenants():
+ '''insert a tenant into the catalogue. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( tenant_schema )
+ r = utils.remove_extra_items(http_content, tenant_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ data = nfvo.new_tenant(mydb, http_content['tenant'])
+ return http_get_tenant_id(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_tenants error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
+def http_edit_tenant_id(tenant_id):
+ '''edit tenant details, can use both uuid or name'''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( tenant_edit_schema )
+ r = utils.remove_extra_items(http_content, tenant_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ #obtain data, check that only one exist
+ try:
+ tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+ #edit data
+ tenant_id = tenant['uuid']
+ where={'uuid': tenant['uuid']}
+ mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
+ return http_get_tenant_id(tenant_id)
+ except bottle.HTTPError:
+ raise
+ except db_base_Exception as e:
+ logger.error("http_edit_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
+def http_delete_tenant_id(tenant_id):
+ '''delete a tenant from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.delete_tenant(mydb, tenant_id)
+ return format_out({"result":"tenant " + data + " deleted"})
+ except bottle.HTTPError:
+ raise
+ except db_base_Exception as e:
+ logger.error("http_delete_tenant_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
+def http_get_datacenters(tenant_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid','name','vim_url','type','created_at') )
+ if tenant_id != 'any':
+ where_['nfvo_tenant_id'] = tenant_id
+ if 'created_at' in select_:
+ select_[ select_.index('created_at') ] = 'd.created_at as created_at'
+ if 'created_at' in where_:
+ where_['d.created_at'] = where_.pop('created_at')
+ datacenters = mydb.get_rows(FROM='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id',
+ SELECT=select_,WHERE=where_,LIMIT=limit_)
+ else:
+ datacenters = mydb.get_rows(FROM='datacenters',
+ SELECT=select_,WHERE=where_,LIMIT=limit_)
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ utils.convert_float_timestamp2str(datacenters)
+ data={'datacenters' : datacenters}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='GET')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='GET')
+def http_get_vim_account(tenant_id, vim_account_id=None):
+ '''get vim_account list/details, '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ select_ = ('uuid', 'name', 'dt.datacenter_id as vim_id', 'vim_tenant_name', 'vim_tenant_id', 'user', 'config',
+ 'dt.created_at as created_at', 'passwd')
+ where_ = {'nfvo_tenant_id': tenant_id}
+ if vim_account_id:
+ where_['dt.uuid'] = vim_account_id
+ from_ = 'tenants_datacenters as td join datacenter_tenants as dt on dt.uuid=td.datacenter_tenant_id'
+ vim_accounts = mydb.get_rows(SELECT=select_, FROM=from_, WHERE=where_)
+
+ if len(vim_accounts) == 0 and vim_account_id:
+ bottle.abort(HTTP_Not_Found, "No vim_account found for tenant {} and id '{}'".format(tenant_id,
+ vim_account_id))
+ for vim_account in vim_accounts:
+ if vim_account["passwd"]:
+ vim_account["passwd"] = "******"
+ if vim_account['config'] != None:
+ try:
+ config_dict = yaml.load(vim_account['config'], Loader=yaml.Loader)
+ vim_account['config'] = config_dict
+ if vim_account['config'].get('admin_password'):
+ vim_account['config']['admin_password'] = "******"
+ if vim_account['config'].get('vcenter_password'):
+ vim_account['config']['vcenter_password'] = "******"
+ if vim_account['config'].get('nsx_password'):
+ vim_account['config']['nsx_password'] = "******"
+ except Exception as e:
+ logger.error("Exception '%s' while trying to load config information", str(e))
+ # change_keys_http2db(content, http2db_datacenter, reverse=True)
+ #convert_datetime2str(vim_account)
+ if vim_account_id:
+ return format_out({"datacenter": vim_accounts[0]})
+ else:
+ return format_out({"datacenters": vim_accounts})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='GET')
+def http_get_datacenter_id(tenant_id, datacenter_id):
+ '''get datacenter details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ what = 'uuid' if utils.check_valid_uuid(datacenter_id) else 'name'
+ where_={}
+ where_[what] = datacenter_id
+ select_=['uuid', 'name','vim_url', 'vim_url_admin', 'type', 'd.config as config', 'description', 'd.created_at as created_at']
+ if tenant_id != 'any':
+ select_.append("datacenter_tenant_id")
+ where_['td.nfvo_tenant_id']= tenant_id
+ from_='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id'
+ else:
+ from_='datacenters as d'
+ datacenters = mydb.get_rows(
+ SELECT=select_,
+ FROM=from_,
+ WHERE=where_)
+
+ if len(datacenters)==0:
+ bottle.abort( httperrors.Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+ elif len(datacenters)>1:
+ bottle.abort( httperrors.Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+ datacenter = datacenters[0]
+ if tenant_id != 'any':
+ #get vim tenant info
+ vim_tenants = mydb.get_rows(
+ SELECT=("vim_tenant_name", "vim_tenant_id", "user", "passwd", "config"),
+ FROM="datacenter_tenants",
+ WHERE={"uuid": datacenters[0]["datacenter_tenant_id"]},
+ ORDER_BY=("created", ) )
+ del datacenter["datacenter_tenant_id"]
+ datacenter["vim_tenants"] = vim_tenants
+ for vim_tenant in vim_tenants:
+ if vim_tenant["passwd"]:
+ vim_tenant["passwd"] = "******"
+ if vim_tenant['config'] != None:
+ try:
+ config_dict = yaml.load(vim_tenant['config'], Loader=yaml.Loader)
+ vim_tenant['config'] = config_dict
+ if vim_tenant['config'].get('admin_password'):
+ vim_tenant['config']['admin_password'] = "******"
+ if vim_tenant['config'].get('vcenter_password'):
+ vim_tenant['config']['vcenter_password'] = "******"
+ if vim_tenant['config'].get('nsx_password'):
+ vim_tenant['config']['nsx_password'] = "******"
+ except Exception as e:
+ logger.error("Exception '%s' while trying to load config information", str(e))
+
+ if datacenter['config'] != None:
+ try:
+ config_dict = yaml.load(datacenter['config'], Loader=yaml.Loader)
+ datacenter['config'] = config_dict
+ if datacenter['config'].get('admin_password'):
+ datacenter['config']['admin_password'] = "******"
+ if datacenter['config'].get('vcenter_password'):
+ datacenter['config']['vcenter_password'] = "******"
+ if datacenter['config'].get('nsx_password'):
+ datacenter['config']['nsx_password'] = "******"
+ except Exception as e:
+ logger.error("Exception '%s' while trying to load config information", str(e))
+ #change_keys_http2db(content, http2db_datacenter, reverse=True)
+ utils.convert_float_timestamp2str(datacenter)
+ data={'datacenter' : datacenter}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters', method='POST')
+def http_post_datacenters():
+ '''insert a datacenter into the catalogue. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in(datacenter_schema, confidential_data=True)
+ r = utils.remove_extra_items(http_content, datacenter_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ data = nfvo.new_datacenter(mydb, http_content['datacenter'])
+ return http_get_datacenter_id('any', data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
+def http_edit_datacenter_id(datacenter_id_name):
+ '''edit datacenter details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( datacenter_edit_schema )
+ r = utils.remove_extra_items(http_content, datacenter_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ try:
+ datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
+ return http_get_datacenter_id('any', datacenter_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_edit_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
+def http_post_sdn_controller(tenant_id):
+ '''insert a sdn controller into the catalogue. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( sdn_controller_schema )
+ try:
+ logger.debug("tenant_id: "+tenant_id)
+ #logger.debug("content: {}".format(http_content['sdn_controller']))
+
+ data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
+ return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
+def http_put_sdn_controller_update(tenant_id, controller_id):
+ '''Update sdn controller'''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( sdn_controller_edit_schema )
+# r = utils.remove_extra_items(http_content, datacenter_schema)
+# if r:
+# logger.debug("Remove received extra items %s", str(r))
+ try:
+ #logger.debug("tenant_id: "+tenant_id)
+ logger.debug("content: {}".format(http_content['sdn_controller']))
+
+ data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
+ return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
+
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
+def http_get_sdn_controller(tenant_id):
+ '''get sdn controllers list, can use both uuid or name'''
+ try:
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+ data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
+def http_get_sdn_controller_id(tenant_id, controller_id):
+ '''get sdn controller details, can use both uuid or name'''
+ try:
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
+ return format_out({"sdn_controllers": data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
+def http_delete_sdn_controller_id(tenant_id, controller_id):
+ '''delete sdn controller, can use both uuid or name'''
+ try:
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
+def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+ '''Set the sdn port mapping for a datacenter. '''
+ #parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, _ = format_in(sdn_port_mapping_schema)
+# r = utils.remove_extra_items(http_content, datacenter_schema)
+# if r:
+# logger.debug("Remove received extra items %s", str(r))
+ try:
+ data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
+ return format_out({"sdn_port_mapping": data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
+def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+ '''get datacenter sdn mapping details, can use both uuid or name'''
+ try:
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+ data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
+ return format_out({"sdn_port_mapping": data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
+def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+ '''clean datacenter sdn mapping, can use both uuid or name'''
+ try:
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
+ return format_out({"result": data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET') #deprecated
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='GET')
+def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+ '''get datacenter networks, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #obtain data
+ try:
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+ where_= {"datacenter_id":datacenter_dict['uuid']}
+ if netmap_id:
+ if utils.check_valid_uuid(netmap_id):
+ where_["uuid"] = netmap_id
+ else:
+ where_["name"] = netmap_id
+ netmaps =mydb.get_rows(FROM='datacenter_nets',
+ SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
+ WHERE=where_ )
+ utils.convert_float_timestamp2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ if netmap_id and len(netmaps)==1:
+ data={'netmap' : netmaps[0]}
+ elif netmap_id and len(netmaps)==0:
+ bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.items())) )
+ return
+ else:
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_getnetwork_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='DELETE')
+def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+ '''get datacenter networks, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #obtain data
+ try:
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+ where_= {"datacenter_id":datacenter_dict['uuid']}
+ if netmap_id:
+ if utils.check_valid_uuid(netmap_id):
+ where_["uuid"] = netmap_id
+ else:
+ where_["name"] = netmap_id
+ #change_keys_http2db(content, http2db_tenant, reverse=True)
+ deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
+ if deleted == 0 and netmap_id:
+ bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.items())) )
+ if netmap_id:
+ return format_out({"result": "netmap {} deleted".format(netmap_id)})
+ else:
+ return format_out({"result": "{} netmap deleted".format(deleted)})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
+def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
+ utils.convert_float_timestamp2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_uploadnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
+def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
+ '''creates a new netmap'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( netmap_new_schema )
+ r = utils.remove_extra_items(http_content, netmap_new_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #obtain data, check that only one exist
+ netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
+ utils.convert_float_timestamp2str(netmaps)
+ utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+ data={'netmaps' : netmaps}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_postnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
+def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
+ '''edit a netmap'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( netmap_edit_schema )
+ r = utils.remove_extra_items(http_content, netmap_edit_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+
+ #obtain data, check that only one exist
+ try:
+ nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
+ return http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_putnettmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
+def http_action_datacenter_id(tenant_id, datacenter_id):
+ '''perform an action over datacenter, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in( datacenter_action_schema )
+ r = utils.remove_extra_items(http_content, datacenter_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #obtain data, check that only one exist
+ result = nfvo.datacenter_action(mydb, tenant_id, datacenter_id, http_content)
+ if 'net-update' in http_content:
+ return http_getnetmap_datacenter_id(datacenter_id)
+ else:
+ return format_out(result)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_action_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
+def http_delete_datacenter_id( datacenter_id):
+ '''delete a tenant from database, can use both uuid or name'''
+
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.delete_datacenter(mydb, datacenter_id)
+ return format_out({"result":"datacenter '" + data + "' deleted"})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_datacenter_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='POST')
+def http_associate_datacenters(tenant_id, datacenter_id=None):
+ '''associate an existing datacenter to a this tenant. '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in(datacenter_associate_schema, confidential_data=True)
+ r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ vim_account_id = nfvo.create_vim_account(mydb, tenant_id, datacenter_id,
+ **http_content['datacenter'])
+ return http_get_vim_account(tenant_id, vim_account_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_associate_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='PUT')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
+def http_vim_account_edit(tenant_id, vim_account_id=None, datacenter_id=None):
+ '''associate an existing datacenter to a this tenant. '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #parse input data
+ http_content,_ = format_in(datacenter_associate_schema)
+ r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ vim_account_id = nfvo.edit_vim_account(mydb, tenant_id, vim_account_id, datacenter_id=datacenter_id,
+ **http_content['datacenter'])
+ return http_get_vim_account(tenant_id, vim_account_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_vim_account_edit error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='DELETE')
+def http_deassociate_datacenters(tenant_id, datacenter_id=None, vim_account_id=None):
+ '''deassociate an existing datacenter to a this tenant. '''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.delete_vim_account(mydb, tenant_id, vim_account_id, datacenter_id)
+ return format_out({"result": data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_deassociate_datacenters error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/attach', method='POST')
+def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, _ = format_in(sdn_external_port_schema)
+ try:
+ data = nfvo.vim_net_sdn_attach(mydb, tenant_id, datacenter_id, network_id, http_content)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vim_net_sdn_attach error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach/<port_id>', method='DELETE')
+def http_delete_vim_net_sdn_detach(tenant_id, datacenter_id, network_id, port_id=None):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.vim_net_sdn_detach(mydb, tenant_id, datacenter_id, network_id, port_id)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_vim_net_sdn_detach error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
+def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.vim_action_get(mydb, tenant_id, datacenter_id, item, name)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
+def http_del_vim_items(tenant_id, datacenter_id, item, name):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ data = nfvo.vim_action_delete(mydb, tenant_id, datacenter_id, item, name)
+ return format_out({"result":data})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_del_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
+def http_post_vim_items(tenant_id, datacenter_id, item):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( object_schema )
+ try:
+ data = nfvo.vim_action_create(mydb, tenant_id, datacenter_id, item, http_content)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vim_items error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
+def http_get_vnfs(tenant_id):
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ if tenant_id != 'any':
+ #check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+ ('uuid', 'name', 'osm_id', 'description', 'public', "tenant_id", "created_at") )
+ if tenant_id != "any":
+ where_["OR"]={"tenant_id": tenant_id, "public": True}
+ vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_, WHERE=where_, LIMIT=limit_)
+ # change_keys_http2db(content, http2db_vnf, reverse=True)
+ utils.convert_str2boolean(vnfs, ('public',))
+ utils.convert_float_timestamp2str(vnfs)
+ data={'vnfs': vnfs}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vnfs error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
+def http_get_vnf_id(tenant_id,vnf_id):
+ '''get vnf details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
+ utils.convert_str2boolean(vnf, ('public',))
+ utils.convert_float_timestamp2str(vnf)
+ return format_out(vnf)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_vnf_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
+def http_post_vnfs(tenant_id):
+ """ Insert a vnf into the catalogue. Creates the flavor and images, and fill the tables at database
+ :param tenant_id: tenant that this vnf belongs to
+ :return:
+ """
+ # print "Parsing the YAML file of the VNF"
+ # parse input data
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, used_schema = format_in( vnfd_schema_v01, ("schema_version",), {"0.2": vnfd_schema_v02})
+ r = utils.remove_extra_items(http_content, used_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ if used_schema == vnfd_schema_v01:
+ vnf_id = nfvo.new_vnf(mydb,tenant_id,http_content)
+ elif used_schema == vnfd_schema_v02:
+ vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
+ else:
+ logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+ bottle.abort(httperrors.Bad_Request, "Invalid schema version")
+ return http_get_vnf_id(tenant_id, vnf_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/v3/<tenant_id>/vnfd', method='POST')
+def http_post_vnfs_v3(tenant_id):
+ """
+ Insert one or several VNFs in the catalog, following OSM IM
+ :param tenant_id: tenant owner of the VNF
+ :return: The detailed list of inserted VNFs, following the old format
+ """
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, _ = format_in(None)
+ try:
+ vnfd_uuid_list = nfvo.new_vnfd_v3(mydb, tenant_id, http_content)
+ vnfd_list = []
+ for vnfd_uuid in vnfd_uuid_list:
+ vnf = nfvo.get_vnf_id(mydb, tenant_id, vnfd_uuid)
+ utils.convert_str2boolean(vnf, ('public',))
+ utils.convert_float_timestamp2str(vnf)
+ vnfd_list.append(vnf["vnf"])
+ return format_out({"vnfd": vnfd_list})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
+def http_delete_vnf_id(tenant_id, vnf_id):
+ '''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #check valid tenant_id and deletes the vnf, including images,
+ try:
+ data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
+ #print json.dumps(data, indent=4)
+ return format_out({"result":"VNF " + data + " deleted"})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_vnf_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+#@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
+#@bottle.route(url_base + '/<tenant_id>/physicalview/Madrid-Alcantara', method='GET')
+@bottle.route(url_base + '/<tenant_id>/physicalview/<datacenter>', method='GET')
+def http_get_hosts(tenant_id, datacenter):
+ '''get the tidvim host hopology from the vim.'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ #print "http_get_hosts received by tenant " + tenant_id + ' datacenter ' + datacenter
+ try:
+ if datacenter == 'treeview':
+ data = nfvo.get_hosts(mydb, tenant_id)
+ else:
+ #openmano-gui is using a hardcoded value for the datacenter
+ result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
+
+ if result < 0:
+ #print("http_get_hosts error {} {}".format((-result, data))
+ bottle.abort(-result, data)
+ else:
+ utils.convert_float_timestamp2str(data)
+ #print json.dumps(data, indent=4)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_hosts error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<path:path>', method='OPTIONS')
+def http_options_deploy(path):
+ '''For some reason GUI web ask for OPTIONS that must be responded'''
+ #TODO: check correct path, and correct headers request
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ bottle.response.set_header('Access-Control-Allow-Methods','POST, GET, PUT, DELETE, OPTIONS')
+ bottle.response.set_header('Accept','application/yaml,application/json')
+ bottle.response.set_header('Content-Type','application/yaml,application/json')
+ bottle.response.set_header('Access-Control-Allow-Headers','content-type')
+ bottle.response.set_header('Access-Control-Allow-Origin','*')
+ return
+
+@bottle.route(url_base + '/<tenant_id>/topology/deploy', method='POST')
+def http_post_deploy(tenant_id):
+ '''post topology deploy.'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+ http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02})
+ #r = utils.remove_extra_items(http_content, used_schema)
+ #if r is not None: print "http_post_deploy: Warning: remove extra items ", r
+ #print "http_post_deploy input: ", http_content
+
+ try:
+ scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+ instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
+ #print json.dumps(data, indent=4)
+ return format_out(instance)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_deploy error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
+def http_post_verify(tenant_id):
+ #TODO:
+# '''post topology verify'''
+# print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ return
+
+#
+# SCENARIOS
+#
+
+@bottle.route(url_base + '/<tenant_id>/scenarios', method='POST')
+def http_post_scenarios(tenant_id):
+ '''add a scenario into the catalogue. Creates the scenario and its internal structure in the OPENMANO DB'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02, "0.3": nsd_schema_v03})
+ #r = utils.remove_extra_items(http_content, used_schema)
+ #if r is not None: print "http_post_scenarios: Warning: remove extra items ", r
+ #print "http_post_scenarios input: ", http_content
+ try:
+ if used_schema == nsd_schema_v01:
+ scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+ elif used_schema == nsd_schema_v02:
+ scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.2")
+ elif used_schema == nsd_schema_v03:
+ scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
+ else:
+ logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+ bottle.abort(httperrors.Bad_Request, "Invalid schema version")
+ #print json.dumps(data, indent=4)
+ #return format_out(data)
+ return http_get_scenario_id(tenant_id, scenario_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/v3/<tenant_id>/nsd', method='POST')
+def http_post_nsds_v3(tenant_id):
+ """
+ Insert one or several NSDs in the catalog, following OSM IM
+ :param tenant_id: tenant owner of the NSD
+ :return: The detailed list of inserted NSDs, following the old format
+ """
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content, _ = format_in(None)
+ try:
+ nsd_uuid_list = nfvo.new_nsd_v3(mydb, tenant_id, http_content)
+ nsd_list = []
+ for nsd_uuid in nsd_uuid_list:
+ scenario = mydb.get_scenario(nsd_uuid, tenant_id)
+ utils.convert_float_timestamp2str(scenario)
+ nsd_list.append(scenario)
+ data = {'nsd': nsd_list}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_nsds_v3 error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
+def http_post_scenario_action(tenant_id, scenario_id):
+ '''take an action over a scenario'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, _ = format_in(scenario_action_schema)
+ r = utils.remove_extra_items(http_content, scenario_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ # check valid tenant_id
+ nfvo.check_tenant(mydb, tenant_id)
+ if "start" in http_content:
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['start']['instance_name'], \
+ http_content['start'].get('description',http_content['start']['instance_name']),
+ http_content['start'].get('datacenter') )
+ return format_out(data)
+ elif "deploy" in http_content: #Equivalent to start
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['deploy']['instance_name'],
+ http_content['deploy'].get('description',http_content['deploy']['instance_name']),
+ http_content['deploy'].get('datacenter') )
+ return format_out(data)
+ elif "reserve" in http_content: #Reserve resources
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['reserve']['instance_name'],
+ http_content['reserve'].get('description',http_content['reserve']['instance_name']),
+ http_content['reserve'].get('datacenter'), startvms=False )
+ return format_out(data)
+ elif "verify" in http_content: #Equivalent to start and then delete
+ data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['verify']['instance_name'],
+ http_content['verify'].get('description',http_content['verify']['instance_name']),
+ http_content['verify'].get('datacenter'), startvms=False )
+ instance_id = data['uuid']
+ nfvo.delete_instance(mydb, tenant_id,instance_id)
+ return format_out({"result":"Verify OK"})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_scenario_action error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
+def http_get_scenarios(tenant_id):
+ '''get scenarios list'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ s,w,l=filter_query_string(bottle.request.query, None,
+ ('uuid', 'name', 'osm_id', 'description', 'tenant_id', 'created_at', 'public'))
+ if tenant_id != "any":
+ w["OR"] = {"tenant_id": tenant_id, "public": True}
+ scenarios = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='scenarios')
+ utils.convert_float_timestamp2str(scenarios)
+ utils.convert_str2boolean(scenarios, ('public',) )
+ data={'scenarios':scenarios}
+ #print json.dumps(scenarios, indent=4)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
+def http_get_scenario_id(tenant_id, scenario_id):
+ '''get scenario details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ scenario = mydb.get_scenario(scenario_id, tenant_id)
+ utils.convert_float_timestamp2str(scenario)
+ data={'scenario' : scenario}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
+def http_delete_scenario_id(tenant_id, scenario_id):
+ '''delete a scenario from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ data = mydb.delete_scenario(scenario_id, tenant_id)
+ #print json.dumps(data, indent=4)
+ return format_out({"result":"scenario " + data + " deleted"})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_scenario_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
+def http_put_scenario_id(tenant_id, scenario_id):
+ '''edit an existing scenario id'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ http_content,_ = format_in( scenario_edit_schema )
+ #r = utils.remove_extra_items(http_content, scenario_edit_schema)
+ #if r is not None: print "http_put_scenario_id: Warning: remove extra items ", r
+ #print "http_put_scenario_id input: ", http_content
+ try:
+ nfvo.edit_scenario(mydb, tenant_id, scenario_id, http_content)
+ #print json.dumps(data, indent=4)
+ #return format_out(data)
+ return http_get_scenario_id(tenant_id, scenario_id)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_put_scenario_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/instances', method='POST')
+def http_post_instances(tenant_id):
+ '''create an instance-scenario'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, used_schema = format_in(instance_scenario_create_schema_v01)
+ r = utils.remove_extra_items(http_content, used_schema)
+ if r is not None:
+ logger.warning("http_post_instances: Warning: remove extra items %s", str(r))
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)), exc_info=True)
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+#
+# INSTANCES
+#
+@bottle.route(url_base + '/<tenant_id>/instances', method='GET')
+def http_get_instances(tenant_id):
+ '''get instance list'''
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ #obtain data
+ s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
+ if tenant_id != "any":
+ w['tenant_id'] = tenant_id
+ instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
+ utils.convert_float_timestamp2str(instances)
+ utils.convert_str2boolean(instances, ('public',) )
+ data={'instances':instances}
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_instances error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
+def http_get_instance_id(tenant_id, instance_id):
+ '''get instances details, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ if tenant_id == "any":
+ tenant_id = None
+
+ instance = nfvo.get_instance_id(mydb, tenant_id, instance_id)
+
+ # Workaround to SO, convert vnfs:vms:interfaces:ip_address from ";" separated list to report the first value
+ for vnf in instance.get("vnfs", ()):
+ for vm in vnf.get("vms", ()):
+ for iface in vm.get("interfaces", ()):
+ if iface.get("ip_address"):
+ index = iface["ip_address"].find(";")
+ if index >= 0:
+ iface["ip_address"] = iface["ip_address"][:index]
+ utils.convert_float_timestamp2str(instance)
+ # print json.dumps(instance, indent=4)
+ return format_out(instance)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
+def http_delete_instance_id(tenant_id, instance_id):
+ '''delete instance from VIM and from database, can use both uuid or name'''
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ if tenant_id == "any":
+ tenant_id = None
+ #obtain data
+ message = nfvo.delete_instance(mydb, tenant_id,instance_id)
+ return format_out({"result":message})
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_delete_instance_id error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
+def http_post_instance_scenario_action(tenant_id, instance_id):
+ """
+ take an action over a scenario instance
+ :param tenant_id: tenant where user belongs to
+ :param instance_id: instance indentity
+ :return:
+ """
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ # parse input data
+ http_content, _ = format_in(instance_scenario_action_schema)
+ r = utils.remove_extra_items(http_content, instance_scenario_action_schema)
+ if r:
+ logger.debug("Remove received extra items %s", str(r))
+ try:
+ #check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+
+ #print "http_post_instance_scenario_action input: ", http_content
+ #obtain data
+ instance = mydb.get_instance_scenario(instance_id, tenant_id)
+ instance_id = instance["uuid"]
+
+ data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_post_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='GET')
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action/<action_id>', method='GET')
+def http_get_instance_scenario_action(tenant_id, instance_id, action_id=None):
+ """
+ List the actions done over an instance, or the action details
+ :param tenant_id: tenant where user belongs to. Can be "any" to ignore
+ :param instance_id: instance id, can be "any" to get actions of all instances
+ :return:
+ """
+ logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+ try:
+ # check valid tenant_id
+ if tenant_id != "any":
+ nfvo.check_tenant(mydb, tenant_id)
+ data = nfvo.instance_action_get(mydb, tenant_id, instance_id, action_id)
+ return format_out(data)
+ except bottle.HTTPError:
+ raise
+ except (nfvo.NfvoException, db_base_Exception) as e:
+ logger.error("http_get_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
+ bottle.abort(e.http_code, str(e))
+ except Exception as e:
+ logger.error("Unexpected exception: ", exc_info=True)
+ bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.error(400)
+@bottle.error(401)
+@bottle.error(404)
+@bottle.error(403)
+@bottle.error(405)
+@bottle.error(406)
+@bottle.error(409)
+@bottle.error(503)
+@bottle.error(500)
+def error400(error):
+ e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
+ bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+ return format_out(e)
+
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
+'''
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$16-sep-2014 22:05:01$"
+
+# import imp
+import json
+import string
+import yaml
+from random import choice as random_choice
+from osm_ro import utils
+from osm_ro.utils import deprecated
+from osm_ro.vim_thread import vim_thread
+import osm_ro.console_proxy_thread as cli
+from osm_ro_plugin.vim_dummy import VimDummyConnector
+from osm_ro_plugin.sdn_dummy import SdnDummyConnector
+from osm_ro_plugin.sdn_failing import SdnFailingConnector
+from osm_ro_plugin import vimconn, sdnconn
+import logging
+import collections
+import math
+from uuid import uuid4
+from osm_ro.db_base import db_base_Exception
+
+from osm_ro import nfvo_db
+from threading import Lock
+import time as t
+from osm_ro.sdn import Sdn, SdnException as ovimException
+
+from Crypto.PublicKey import RSA
+
+import osm_im.vnfd as vnfd_catalog
+import osm_im.nsd as nsd_catalog
+from pyangbind.lib.serialise import pybindJSONDecoder
+from copy import deepcopy
+from pkg_resources import iter_entry_points
+
+
+# WIM
+from .http_tools import errors as httperrors
+from .wim.engine import WimEngine
+from .wim.persistence import WimPersistence
+from copy import deepcopy
+from pprint import pformat
+#
+
+global global_config
+# WIM
+global wim_engine
+wim_engine = None
+global sdnconn_imported
+#
+global logger
+global default_volume_size
+default_volume_size = '5' #size in GB
+global ovim
+ovim = None
+global_config = None
+
+plugins = {} # dictionary with VIM type as key, loaded module as value
+vim_threads = {"running":{}, "deleting": {}, "names": []} # threads running for attached-VIMs
+vim_persistent_info = {}
+# WIM
+sdnconn_imported = {} # dictionary with WIM type as key, loaded module as value
+wim_threads = {"running":{}, "deleting": {}, "names": []} # threads running for attached-WIMs
+wim_persistent_info = {}
+#
+
+logger = logging.getLogger('openmano.nfvo')
+task_lock = Lock()
+last_task_id = 0.0
+db = None
+db_lock = Lock()
+
+worker_id = None
+
+class NfvoException(httperrors.HttpMappedError):
+ """Common Class for NFVO errors"""
+
+def _load_plugin(name, type="vim"):
+ # type can be vim or sdn
+ global plugins
+ try:
+ for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+ plugins[name] = v.load()
+ except Exception as e:
+ logger.critical("Cannot load osm_{}: {}".format(name, e))
+ if name:
+ plugins[name] = SdnFailingConnector("Cannot load osm_{}: {}".format(name, e))
+ if name and name not in plugins:
+ error_text = "Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been" \
+ " registered".format(t=type, n=name)
+ logger.critical(error_text)
+ plugins[name] = SdnFailingConnector(error_text)
+ # raise NfvoException("Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been registered".
+ # format(t=type, n=name), httperrors.Bad_Request)
+
+def get_task_id():
+ global last_task_id
+ task_id = t.time()
+ if task_id <= last_task_id:
+ task_id = last_task_id + 0.000001
+ last_task_id = task_id
+ return "ACTION-{:.6f}".format(task_id)
+ # return (t.strftime("%Y%m%dT%H%M%S.{}%Z", t.localtime(task_id))).format(int((task_id % 1)*1e6))
+
+
+def new_task(name, params, depends=None):
+ """Deprected!!!"""
+ task_id = get_task_id()
+ task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
+ if depends:
+ task["depends"] = depends
+ return task
+
+
+def is_task_id(id):
+ return True if id[:5] == "TASK-" else False
+
+def get_process_id():
+ """
+ Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it
+ will provide a random one
+ :return: Obtained ID
+ """
+ # Try getting docker id. If fails, get pid
+ try:
+ with open("/proc/self/cgroup", "r") as f:
+ for text_id_ in f.readlines():
+ if "docker/" not in text_id_:
+ continue
+ _, _, text_id = text_id_.rpartition("/")
+ text_id = text_id.replace("\n", "")[:12]
+ if text_id:
+ return text_id
+ except Exception:
+ pass
+ # Return a random id
+ return "".join(random_choice("0123456789abcdef") for _ in range(12))
+
+def get_non_used_vim_name(datacenter_name, datacenter_id):
+ return "{}:{}:{}".format(
+ worker_id[:12], datacenter_id.replace("-", "")[:32], datacenter_name[:16]
+ )
+
+# -- Move
+def get_non_used_wim_name(wim_name, wim_id, tenant_name, tenant_id):
+ name = wim_name[:16]
+ if name not in wim_threads["names"]:
+ wim_threads["names"].append(name)
+ return name
+ name = wim_name[:16] + "." + tenant_name[:16]
+ if name not in wim_threads["names"]:
+ wim_threads["names"].append(name)
+ return name
+ name = wim_id + "-" + tenant_id
+ wim_threads["names"].append(name)
+ return name
+
+
+def start_service(mydb, persistence=None, wim=None):
+ global db, global_config, plugins, ovim, worker_id
+ db = nfvo_db.nfvo_db(lock=db_lock)
+ mydb.lock = db_lock
+ db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+
+ persistence = persistence or WimPersistence(db)
+
+ try:
+ worker_id = get_process_id()
+ if "rosdn_dummy" not in plugins:
+ plugins["rosdn_dummy"] = SdnDummyConnector
+ if "rovim_dummy" not in plugins:
+ plugins["rovim_dummy"] = VimDummyConnector
+ # starts ovim library
+ ovim = Sdn(db, plugins)
+
+ global wim_engine
+ wim_engine = wim or WimEngine(persistence, plugins)
+ wim_engine.ovim = ovim
+
+ ovim.start_service()
+
+ #delete old unneeded vim_wim_actions
+ clean_db(mydb)
+
+ # starts vim_threads
+ from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join '\
+ 'datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+ select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+ 'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+ 'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+ 'user', 'passwd', 'dt.config as dt_config', 'nfvo_tenant_id')
+ vims = mydb.get_rows(FROM=from_, SELECT=select_)
+ for vim in vims:
+ extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+ 'datacenter_id': vim.get('datacenter_id')}
+ if vim["config"]:
+ extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+ if vim.get('dt_config'):
+ extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+ plugin_name = "rovim_" + vim["type"]
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="vim")
+
+ thread_id = vim['datacenter_tenant_id']
+ vim_persistent_info[thread_id] = {}
+ try:
+ #if not tenant:
+ # return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM %s" % ( vim["type"])
+ myvim = plugins[plugin_name](
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim['user'], passwd=vim['passwd'],
+ config=extra, persistent_info=vim_persistent_info[thread_id]
+ )
+ except vimconn.VimConnException as e:
+ myvim = e
+ logger.error("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+ vim['datacenter_id'], e))
+ except Exception as e:
+ logger.critical("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+ vim['datacenter_id'], e))
+ # raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, e),
+ # httperrors.Internal_Server_Error)
+ thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'])
+ new_thread = vim_thread(task_lock, plugins, thread_name, None,
+ vim['datacenter_tenant_id'], db=db)
+ new_thread.start()
+ vim_threads["running"][thread_id] = new_thread
+ wims = mydb.get_rows(FROM="wim_accounts join wims on wim_accounts.wim_id=wims.uuid",
+ WHERE={"sdn": "true"},
+ SELECT=("wim_accounts.uuid as uuid", "type", "wim_accounts.name as name"))
+ for wim in wims:
+ plugin_name = "rosdn_" + wim["type"]
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="sdn")
+
+ thread_id = wim['uuid']
+ thread_name = get_non_used_vim_name(wim['name'], wim['uuid'])
+ new_thread = vim_thread(task_lock, plugins, thread_name, wim['uuid'], None, db=db)
+ new_thread.start()
+ vim_threads["running"][thread_id] = new_thread
+ wim_engine.start_threads()
+ except db_base_Exception as e:
+ raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+ except ovimException as e:
+ message = str(e)
+ if message[:22] == "DATABASE wrong version":
+ message = "DATABASE wrong version of lib_osm_openvim {msg} -d{dbname} -u{dbuser} -p{dbpass} {ver}' "\
+ "at host {dbhost}".format(
+ msg=message[22:-3], dbname=global_config["db_ovim_name"],
+ dbuser=global_config["db_ovim_user"], dbpass=global_config["db_ovim_passwd"],
+ ver=message[-3:-1], dbhost=global_config["db_ovim_host"])
+ raise NfvoException(message, httperrors.Bad_Request)
+
+
+def stop_service():
+ global ovim, global_config
+ if ovim:
+ ovim.stop_service()
+ for thread_id, thread in vim_threads["running"].items():
+ thread.insert_task("exit")
+ vim_threads["deleting"][thread_id] = thread
+ vim_threads["running"] = {}
+
+ if wim_engine:
+ wim_engine.stop_threads()
+
+ if global_config and global_config.get("console_thread"):
+ for thread in global_config["console_thread"]:
+ thread.terminate = True
+
+def get_version():
+ return ("openmanod version {} {}\n(c) Copyright Telefonica".format(global_config["version"],
+ global_config["version_date"] ))
+
+def clean_db(mydb):
+ """
+ Clean unused or old entries at database to avoid unlimited growing
+ :param mydb: database connector
+ :return: None
+ """
+ # get and delete unused vim_wim_actions: all elements deleted, one week before, instance not present
+ now = t.time()-3600*24*7
+ instance_action_id = None
+ nb_deleted = 0
+ while True:
+ actions_to_delete = mydb.get_rows(
+ SELECT=("item", "item_id", "instance_action_id"),
+ FROM="vim_wim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
+ "left join instance_scenarios as i on ia.instance_id=i.uuid",
+ WHERE={"va.action": "DELETE", "va.modified_at<": now, "i.uuid": None,
+ "va.status": ("DONE", "SUPERSEDED")},
+ LIMIT=100
+ )
+ for to_delete in actions_to_delete:
+ mydb.delete_row(FROM="vim_wim_actions", WHERE=to_delete)
+ if instance_action_id != to_delete["instance_action_id"]:
+ instance_action_id = to_delete["instance_action_id"]
+ mydb.delete_row(FROM="instance_actions", WHERE={"uuid": instance_action_id})
+ nb_deleted += len(actions_to_delete)
+ if len(actions_to_delete) < 100:
+ break
+ # clean locks
+ mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+
+ if nb_deleted:
+ logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
+
+
+def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
+ '''Obtain flavorList
+ return result, content:
+ <0, error_text upon error
+ nb_records, flavor_list on success
+ '''
+ WHERE_dict={}
+ WHERE_dict['vnf_id'] = vnf_id
+ if nfvo_tenant is not None:
+ WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+
+ #result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
+ #result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
+ flavors = mydb.get_rows(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
+ #print "get_flavor_list result:", result
+ #print "get_flavor_list content:", content
+ flavorList=[]
+ for flavor in flavors:
+ flavorList.append(flavor['flavor_id'])
+ return flavorList
+
+
+def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
+ """
+ Get used images of all vms belonging to this VNFD
+ :param mydb: database conector
+ :param vnf_id: vnfd uuid
+ :param nfvo_tenant: tenant, not used
+ :return: The list of image uuid used
+ """
+ image_list = []
+ vms = mydb.get_rows(SELECT=('image_id','image_list'), FROM='vms', WHERE={'vnf_id': vnf_id})
+ for vm in vms:
+ if vm["image_id"] and vm["image_id"] not in image_list:
+ image_list.append(vm["image_id"])
+ if vm["image_list"]:
+ vm_image_list = yaml.load(vm["image_list"], Loader=yaml.Loader)
+ for image_dict in vm_image_list:
+ if image_dict["image_id"] not in image_list:
+ image_list.append(image_dict["image_id"])
+ return image_list
+
+
+def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, datacenter_tenant_id=None,
+ vim_tenant=None, vim_tenant_name=None, vim_user=None, vim_passwd=None, ignore_errors=False):
+ '''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
+ return dictionary with {datacenter_id: vim_class, ... }. vim_class contain:
+ 'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
+ raise exception upon error
+ '''
+ global plugins
+ WHERE_dict={}
+ if nfvo_tenant is not None: WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+ if datacenter_id is not None: WHERE_dict['d.uuid'] = datacenter_id
+ if datacenter_tenant_id is not None: WHERE_dict['datacenter_tenant_id'] = datacenter_tenant_id
+ if datacenter_name is not None: WHERE_dict['d.name'] = datacenter_name
+ if vim_tenant is not None: WHERE_dict['dt.vim_tenant_id'] = vim_tenant
+ if vim_tenant_name is not None: WHERE_dict['vim_tenant_name'] = vim_tenant_name
+ if nfvo_tenant or vim_tenant or vim_tenant_name or datacenter_tenant_id:
+ from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+ select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
+ 'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
+ 'user','passwd', 'dt.config as dt_config')
+ else:
+ from_ = 'datacenters as d'
+ select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
+ try:
+ vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
+ vim_dict={}
+ for vim in vims:
+ extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+ 'datacenter_id': vim.get('datacenter_id'),
+ '_vim_type_internal': vim.get('type')}
+ if vim["config"]:
+ extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+ if vim.get('dt_config'):
+ extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+ plugin_name = "rovim_" + vim["type"]
+ if plugin_name not in plugins:
+ try:
+ _load_plugin(plugin_name, type="vim")
+ except NfvoException as e:
+ if ignore_errors:
+ logger.error("{}".format(e))
+ continue
+ else:
+ raise
+ try:
+ if 'datacenter_tenant_id' in vim:
+ thread_id = vim["datacenter_tenant_id"]
+ if thread_id not in vim_persistent_info:
+ vim_persistent_info[thread_id] = {}
+ persistent_info = vim_persistent_info[thread_id]
+ else:
+ persistent_info = {}
+ #if not tenant:
+ # return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM %s" % ( vim["type"])
+ vim_dict[vim['datacenter_id']] = plugins[plugin_name](
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim.get('vim_tenant_id',vim_tenant),
+ tenant_name=vim.get('vim_tenant_name',vim_tenant_name),
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim.get('user',vim_user), passwd=vim.get('passwd',vim_passwd),
+ config=extra, persistent_info=persistent_info
+ )
+ except Exception as e:
+ if ignore_errors:
+ logger.error("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, str(e)))
+ continue
+ http_code = httperrors.Internal_Server_Error
+ if isinstance(e, vimconn.VimConnException):
+ http_code = e.http_code
+ raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), http_code)
+ return vim_dict
+ except db_base_Exception as e:
+ raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+
+
+def rollback(mydb, vims, rollback_list):
+ undeleted_items=[]
+ #delete things by reverse order
+ for i in range(len(rollback_list)-1, -1, -1):
+ item = rollback_list[i]
+ if item["where"]=="vim":
+ if item["vim_id"] not in vims:
+ continue
+ if is_task_id(item["uuid"]):
+ continue
+ vim = vims[item["vim_id"]]
+ try:
+ if item["what"]=="image":
+ vim.delete_image(item["uuid"])
+ mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+ elif item["what"]=="flavor":
+ vim.delete_flavor(item["uuid"])
+ mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+ elif item["what"]=="network":
+ vim.delete_network(item["uuid"])
+ elif item["what"]=="vm":
+ vim.delete_vminstance(item["uuid"])
+ except vimconn.VimConnException as e:
+ logger.error("Error in rollback. Not possible to delete VIM %s '%s'. Message: %s", item['what'], item["uuid"], str(e))
+ undeleted_items.append("{} {} from VIM {}".format(item['what'], item["uuid"], vim["name"]))
+ except db_base_Exception as e:
+ logger.error("Error in rollback. Not possible to delete %s '%s' from DB.datacenters Message: %s", item['what'], item["uuid"], str(e))
+
+ else: # where==mano
+ try:
+ if item["what"]=="image":
+ mydb.delete_row(FROM="images", WHERE={"uuid": item["uuid"]})
+ elif item["what"]=="flavor":
+ mydb.delete_row(FROM="flavors", WHERE={"uuid": item["uuid"]})
+ except db_base_Exception as e:
+ logger.error("Error in rollback. Not possible to delete %s '%s' from DB. Message: %s", item['what'], item["uuid"], str(e))
+ undeleted_items.append("{} '{}'".format(item['what'], item["uuid"]))
+ if len(undeleted_items)==0:
+ return True, "Rollback successful."
+ else:
+ return False, "Rollback fails to delete: " + str(undeleted_items)
+
+
+def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
+ global global_config
+ #create a dictionary with vnfc-name: vnfc:interface-list key:values pairs
+ vnfc_interfaces={}
+ for vnfc in vnf_descriptor["vnf"]["VNFC"]:
+ name_dict = {}
+ #dataplane interfaces
+ for numa in vnfc.get("numas",() ):
+ for interface in numa.get("interfaces",()):
+ if interface["name"] in name_dict:
+ raise NfvoException(
+ "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
+ vnfc["name"], interface["name"]),
+ httperrors.Bad_Request)
+ name_dict[ interface["name"] ] = "underlay"
+ #bridge interfaces
+ for interface in vnfc.get("bridge-ifaces",() ):
+ if interface["name"] in name_dict:
+ raise NfvoException(
+ "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
+ vnfc["name"], interface["name"]),
+ httperrors.Bad_Request)
+ name_dict[ interface["name"] ] = "overlay"
+ vnfc_interfaces[ vnfc["name"] ] = name_dict
+ # check bood-data info
+ # if "boot-data" in vnfc:
+ # # check that user-data is incompatible with users and config-files
+ # if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
+ # raise NfvoException(
+ # "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
+ # httperrors.Bad_Request)
+
+ #check if the info in external_connections matches with the one in the vnfcs
+ name_list=[]
+ for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
+ if external_connection["name"] in name_list:
+ raise NfvoException(
+ "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
+ external_connection["name"]),
+ httperrors.Bad_Request)
+ name_list.append(external_connection["name"])
+ if external_connection["VNFC"] not in vnfc_interfaces:
+ raise NfvoException(
+ "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
+ external_connection["name"], external_connection["VNFC"]),
+ httperrors.Bad_Request)
+
+ if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
+ raise NfvoException(
+ "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+ external_connection["name"],
+ external_connection["local_iface_name"]),
+ httperrors.Bad_Request )
+
+ #check if the info in internal_connections matches with the one in the vnfcs
+ name_list=[]
+ for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
+ if internal_connection["name"] in name_list:
+ raise NfvoException(
+ "Error at vnf:internal-connections:name, value '{}' already used as an internal-connection".format(
+ internal_connection["name"]),
+ httperrors.Bad_Request)
+ name_list.append(internal_connection["name"])
+ #We should check that internal-connections of type "ptp" have only 2 elements
+
+ if len(internal_connection["elements"])>2 and (internal_connection.get("type") == "ptp" or internal_connection.get("type") == "e-line"):
+ raise NfvoException(
+ "Error at 'vnf:internal-connections[name:'{}']:elements', size must be 2 for a '{}' type. Consider change it to '{}' type".format(
+ internal_connection["name"],
+ 'ptp' if vnf_descriptor_version==1 else 'e-line',
+ 'data' if vnf_descriptor_version==1 else "e-lan"),
+ httperrors.Bad_Request)
+ for port in internal_connection["elements"]:
+ vnf = port["VNFC"]
+ iface = port["local_iface_name"]
+ if vnf not in vnfc_interfaces:
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
+ internal_connection["name"], vnf),
+ httperrors.Bad_Request)
+ if iface not in vnfc_interfaces[ vnf ]:
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+ internal_connection["name"], iface),
+ httperrors.Bad_Request)
+ return -httperrors.Bad_Request,
+ if vnf_descriptor_version==1 and "type" not in internal_connection:
+ if vnfc_interfaces[vnf][iface] == "overlay":
+ internal_connection["type"] = "bridge"
+ else:
+ internal_connection["type"] = "data"
+ if vnf_descriptor_version==2 and "implementation" not in internal_connection:
+ if vnfc_interfaces[vnf][iface] == "overlay":
+ internal_connection["implementation"] = "overlay"
+ else:
+ internal_connection["implementation"] = "underlay"
+ if (internal_connection.get("type") == "data" or internal_connection.get("type") == "ptp" or \
+ internal_connection.get("implementation") == "underlay") and vnfc_interfaces[vnf][iface] == "overlay":
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+ internal_connection["name"],
+ iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
+ 'data' if vnf_descriptor_version==1 else 'underlay'),
+ httperrors.Bad_Request)
+ if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
+ vnfc_interfaces[vnf][iface] == "underlay":
+ raise NfvoException(
+ "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+ internal_connection["name"], iface,
+ 'data' if vnf_descriptor_version==1 else 'underlay',
+ 'bridge' if vnf_descriptor_version==1 else 'overlay'),
+ httperrors.Bad_Request)
+
+
+def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
+ #look if image exist
+ if only_create_at_vim:
+ image_mano_id = image_dict['uuid']
+ if return_on_error == None:
+ return_on_error = True
+ else:
+ if image_dict['location']:
+ images = mydb.get_rows(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
+ else:
+ images = mydb.get_rows(FROM="images", WHERE={'universal_name':image_dict['universal_name'], 'checksum':image_dict['checksum']})
+ if len(images)>=1:
+ image_mano_id = images[0]['uuid']
+ else:
+ #create image in MANO DB
+ temp_image_dict={'name':image_dict['name'], 'description':image_dict.get('description',None),
+ 'location':image_dict['location'], 'metadata':image_dict.get('metadata',None),
+ 'universal_name':image_dict['universal_name'] , 'checksum':image_dict['checksum']
+ }
+ #temp_image_dict['location'] = image_dict.get('new_location') if image_dict['location'] is None
+ image_mano_id = mydb.new_row('images', temp_image_dict, add_uuid=True)
+ rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
+ #create image at every vim
+ for vim_id,vim in vims.items():
+ datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+ image_created="false"
+ #look at database
+ image_db = mydb.get_rows(FROM="datacenters_images",
+ WHERE={'datacenter_vim_id': datacenter_vim_id, 'image_id': image_mano_id})
+ #look at VIM if this image exist
+ try:
+ if image_dict['location'] is not None:
+ image_vim_id = vim.get_image_id_from_path(image_dict['location'])
+ else:
+ filter_dict = {}
+ filter_dict['name'] = image_dict['universal_name']
+ if image_dict.get('checksum') != None:
+ filter_dict['checksum'] = image_dict['checksum']
+ #logger.debug('>>>>>>>> Filter dict: %s', str(filter_dict))
+ vim_images = vim.get_image_list(filter_dict)
+ #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
+ if len(vim_images) > 1:
+ raise vimconn.VimConnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), httperrors.Conflict)
+ elif len(vim_images) == 0:
+ raise vimconn.VimConnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
+ else:
+ #logger.debug('>>>>>>>> VIM image 0: %s', str(vim_images[0]))
+ image_vim_id = vim_images[0]['id']
+
+ except vimconn.VimConnNotFoundException as e:
+ #Create the image in VIM only if image_dict['location'] or image_dict['new_location'] is not None
+ try:
+ #image_dict['location']=image_dict.get('new_location') if image_dict['location'] is None
+ if image_dict['location']:
+ image_vim_id = vim.new_image(image_dict)
+ rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
+ image_created="true"
+ else:
+ #If we reach this point, then the image has image name, and optionally checksum, and could not be found
+ raise vimconn.VimConnException(str(e))
+ except vimconn.VimConnException as e:
+ if return_on_error:
+ logger.error("Error creating image at VIM '%s': %s", vim["name"], str(e))
+ raise
+ image_vim_id = None
+ logger.warn("Error creating image at VIM '%s': %s", vim["name"], str(e))
+ continue
+ except vimconn.VimConnException as e:
+ if return_on_error:
+ logger.error("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+ raise
+ logger.warn("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+ image_vim_id = None
+ continue
+ #if we reach here, the image has been created or existed
+ if len(image_db)==0:
+ #add new vim_id at datacenters_images
+ mydb.new_row('datacenters_images', {'datacenter_vim_id': datacenter_vim_id,
+ 'image_id':image_mano_id,
+ 'vim_id': image_vim_id,
+ 'created':image_created})
+ elif image_db[0]["vim_id"]!=image_vim_id:
+ #modify existing vim_id at datacenters_images
+ mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_vim_id':vim_id, 'image_id':image_mano_id})
+
+ return image_vim_id if only_create_at_vim else image_mano_id
+
+
+def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+ temp_flavor_dict= {'disk':flavor_dict.get('disk',0),
+ 'ram':flavor_dict.get('ram'),
+ 'vcpus':flavor_dict.get('vcpus'),
+ }
+ if 'extended' in flavor_dict and flavor_dict['extended']==None:
+ del flavor_dict['extended']
+ if 'extended' in flavor_dict:
+ temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
+
+ #look if flavor exist
+ if only_create_at_vim:
+ flavor_mano_id = flavor_dict['uuid']
+ if return_on_error == None:
+ return_on_error = True
+ else:
+ flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+ if len(flavors)>=1:
+ flavor_mano_id = flavors[0]['uuid']
+ else:
+ #create flavor
+ #create one by one the images of aditional disks
+ dev_image_list=[] #list of images
+ if 'extended' in flavor_dict and flavor_dict['extended']!=None:
+ dev_nb=0
+ for device in flavor_dict['extended'].get('devices',[]):
+ if "image" not in device and "image name" not in device:
+ continue
+ image_dict={}
+ image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+ image_dict['universal_name']=device.get('image name')
+ image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+ image_dict['location']=device.get('image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=device.get('image checksum')
+ image_metadata_dict = device.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict != None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
+ dev_image_list.append(image_id)
+ dev_nb += 1
+ temp_flavor_dict['name'] = flavor_dict['name']
+ temp_flavor_dict['description'] = flavor_dict.get('description',None)
+ content = mydb.new_row('flavors', temp_flavor_dict, add_uuid=True)
+ flavor_mano_id= content
+ rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
+ #create flavor at every vim
+ if 'uuid' in flavor_dict:
+ del flavor_dict['uuid']
+ flavor_vim_id=None
+ for vim_id,vim in vims.items():
+ datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+ flavor_created="false"
+ #look at database
+ flavor_db = mydb.get_rows(FROM="datacenters_flavors",
+ WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+ #look at VIM if this flavor exist SKIPPED
+ #res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
+ #if res_vim < 0:
+ # print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
+ # continue
+ #elif res_vim==0:
+
+ # Create the flavor in VIM
+ # Translate images at devices from MANO id to VIM id
+ disk_list = []
+ if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
+ # make a copy of original devices
+ devices_original=[]
+
+ for device in flavor_dict["extended"].get("devices",[]):
+ dev={}
+ dev.update(device)
+ devices_original.append(dev)
+ if 'image' in device:
+ del device['image']
+ if 'image metadata' in device:
+ del device['image metadata']
+ if 'image checksum' in device:
+ del device['image checksum']
+ dev_nb = 0
+ for index in range(0,len(devices_original)) :
+ device=devices_original[index]
+ if "image" not in device and "image name" not in device:
+ # if 'size' in device:
+ disk_list.append({'size': device.get('size', default_volume_size), 'name': device.get('name')})
+ continue
+ image_dict={}
+ image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+ image_dict['universal_name']=device.get('image name')
+ image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+ image_dict['location']=device.get('image')
+ # image_dict['new_location']=device.get('image location')
+ image_dict['checksum']=device.get('image checksum')
+ image_metadata_dict = device.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict != None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
+ image_dict["uuid"]=image_mano_id
+ image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
+
+ #save disk information (image must be based on and size
+ disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
+
+ flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
+ dev_nb += 1
+ if len(flavor_db)>0:
+ #check that this vim_id exist in VIM, if not create
+ flavor_vim_id=flavor_db[0]["vim_id"]
+ try:
+ vim.get_flavor(flavor_vim_id)
+ continue #flavor exist
+ except vimconn.VimConnException:
+ pass
+ #create flavor at vim
+ logger.debug("nfvo.create_or_use_flavor() adding flavor to VIM %s", vim["name"])
+ try:
+ flavor_vim_id = None
+ flavor_vim_id=vim.get_flavor_id_from_data(flavor_dict)
+ flavor_created="false"
+ except vimconn.VimConnException as e:
+ pass
+ try:
+ if not flavor_vim_id:
+ flavor_vim_id = vim.new_flavor(flavor_dict)
+ rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
+ flavor_created="true"
+ except vimconn.VimConnException as e:
+ if return_on_error:
+ logger.error("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+ raise
+ logger.warn("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+ flavor_vim_id = None
+ continue
+ #if reach here the flavor has been create or exist
+ if len(flavor_db)==0:
+ #add new vim_id at datacenters_flavors
+ extended_devices_yaml = None
+ if len(disk_list) > 0:
+ extended_devices = dict()
+ extended_devices['disks'] = disk_list
+ extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
+ mydb.new_row('datacenters_flavors',
+ {'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id, 'vim_id': flavor_vim_id,
+ 'created': flavor_created, 'extended': extended_devices_yaml})
+ elif flavor_db[0]["vim_id"]!=flavor_vim_id:
+ #modify existing vim_id at datacenters_flavors
+ mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id},
+ WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+
+ return flavor_vim_id if only_create_at_vim else flavor_mano_id
+
+
+def get_str(obj, field, length):
+ """
+ Obtain the str value,
+ :param obj:
+ :param length:
+ :return:
+ """
+ value = obj.get(field)
+ if value is not None:
+ value = str(value)[:length]
+ return value
+
+def _lookfor_or_create_image(db_image, mydb, descriptor):
+ """
+ fill image content at db_image dictionary. Check if the image with this image and checksum exist
+ :param db_image: dictionary to insert data
+ :param mydb: database connector
+ :param descriptor: yang descriptor
+ :return: uuid if the image exist at DB, or None if a new image must be created with the data filled at db_image
+ """
+
+ db_image["name"] = get_str(descriptor, "image", 255)
+ db_image["checksum"] = get_str(descriptor, "image-checksum", 32)
+ if not db_image["checksum"]: # Ensure that if empty string, None is stored
+ db_image["checksum"] = None
+ if db_image["name"].startswith("/"):
+ db_image["location"] = db_image["name"]
+ existing_images = mydb.get_rows(FROM="images", WHERE={'location': db_image["location"]})
+ else:
+ db_image["universal_name"] = db_image["name"]
+ existing_images = mydb.get_rows(FROM="images", WHERE={'universal_name': db_image['universal_name'],
+ 'checksum': db_image['checksum']})
+ if existing_images:
+ return existing_images[0]["uuid"]
+ else:
+ image_uuid = str(uuid4())
+ db_image["uuid"] = image_uuid
+ return None
+
+def get_resource_allocation_params(quota_descriptor):
+ """
+ read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
+ :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
+ :return: quota params for limit, reserve, shares from the descriptor object
+ """
+ quota = {}
+ if quota_descriptor.get("limit"):
+ quota["limit"] = int(quota_descriptor["limit"])
+ if quota_descriptor.get("reserve"):
+ quota["reserve"] = int(quota_descriptor["reserve"])
+ if quota_descriptor.get("shares"):
+ quota["shares"] = int(quota_descriptor["shares"])
+ return quota
+
+def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
+ """
+ Parses an OSM IM vnfd_catalog and insert at DB
+ :param mydb:
+ :param tenant_id:
+ :param vnf_descriptor:
+ :return: The list of cretated vnf ids
+ """
+ try:
+ myvnfd = vnfd_catalog.vnfd()
+ try:
+ pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
+ skip_unknown=True)
+ except Exception as e:
+ raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
+ db_vnfs = []
+ db_nets = []
+ db_vms = []
+ db_vms_index = 0
+ db_interfaces = []
+ db_images = []
+ db_flavors = []
+ db_ip_profiles_index = 0
+ db_ip_profiles = []
+ uuid_list = []
+ vnfd_uuid_list = []
+ vnfd_catalog_descriptor = vnf_descriptor.get("vnfd:vnfd-catalog")
+ if not vnfd_catalog_descriptor:
+ vnfd_catalog_descriptor = vnf_descriptor.get("vnfd-catalog")
+ vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd")
+ if not vnfd_descriptor_list:
+ vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd:vnfd")
+ for vnfd_yang in myvnfd.vnfd_catalog.vnfd.values():
+ vnfd = vnfd_yang.get()
+
+ # table vnf
+ vnf_uuid = str(uuid4())
+ uuid_list.append(vnf_uuid)
+ vnfd_uuid_list.append(vnf_uuid)
+ vnfd_id = get_str(vnfd, "id", 255)
+ db_vnf = {
+ "uuid": vnf_uuid,
+ "osm_id": vnfd_id,
+ "name": get_str(vnfd, "name", 255),
+ "description": get_str(vnfd, "description", 255),
+ "tenant_id": tenant_id,
+ "vendor": get_str(vnfd, "vendor", 255),
+ "short_name": get_str(vnfd, "short-name", 255),
+ "descriptor": str(vnf_descriptor)[:60000]
+ }
+
+ for vnfd_descriptor in vnfd_descriptor_list:
+ if vnfd_descriptor["id"] == str(vnfd["id"]):
+ break
+
+ # table ip_profiles (ip-profiles)
+ ip_profile_name2db_table_index = {}
+ for ip_profile in vnfd.get("ip-profiles").values():
+ db_ip_profile = {
+ "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+ "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+ "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+ "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+ "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+ "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+ }
+ dns_list = []
+ for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+ dns_list.append(str(dns.get("address")))
+ db_ip_profile["dns_address"] = ";".join(dns_list)
+ if ip_profile["ip-profile-params"].get('security-group'):
+ db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+ ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+ db_ip_profiles_index += 1
+ db_ip_profiles.append(db_ip_profile)
+
+ # table nets (internal-vld)
+ net_id2uuid = {} # for mapping interface with network
+ net_id2index = {} # for mapping interface with network
+ for vld in vnfd.get("internal-vld").values():
+ net_uuid = str(uuid4())
+ uuid_list.append(net_uuid)
+ db_net = {
+ "name": get_str(vld, "name", 255),
+ "vnf_id": vnf_uuid,
+ "uuid": net_uuid,
+ "description": get_str(vld, "description", 255),
+ "osm_id": get_str(vld, "id", 255),
+ "type": "bridge", # TODO adjust depending on connection point type
+ }
+ net_id2uuid[vld.get("id")] = net_uuid
+ net_id2index[vld.get("id")] = len(db_nets)
+ db_nets.append(db_net)
+ # ip-profile, link db_ip_profile with db_sce_net
+ if vld.get("ip-profile-ref"):
+ ip_profile_name = vld.get("ip-profile-ref")
+ if ip_profile_name not in ip_profile_name2db_table_index:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
+ "'{}'. Reference to a non-existing 'ip_profiles'".format(
+ str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+ httperrors.Bad_Request)
+ db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
+ else: #check no ip-address has been defined
+ for icp in vld.get("internal-connection-point").values():
+ if icp.get("ip-address"):
+ raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
+ "contains an ip-address but no ip-profile has been defined at VLD".format(
+ str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
+ httperrors.Bad_Request)
+
+ # connection points vaiable declaration
+ cp_name2iface_uuid = {}
+ cp_name2vdu_id = {}
+ cp_name2vm_uuid = {}
+ cp_name2db_interface = {}
+ vdu_id2cp_name = {} # stored only when one external connection point is presented at this VDU
+
+ # table vms (vdus)
+ vdu_id2uuid = {}
+ vdu_id2db_table_index = {}
+ mgmt_access = {}
+ for vdu in vnfd.get("vdu").values():
+
+ for vdu_descriptor in vnfd_descriptor["vdu"]:
+ if vdu_descriptor["id"] == str(vdu["id"]):
+ break
+ vm_uuid = str(uuid4())
+ uuid_list.append(vm_uuid)
+ vdu_id = get_str(vdu, "id", 255)
+ db_vm = {
+ "uuid": vm_uuid,
+ "osm_id": vdu_id,
+ "name": get_str(vdu, "name", 255),
+ "description": get_str(vdu, "description", 255),
+ "pdu_type": get_str(vdu, "pdu-type", 255),
+ "vnf_id": vnf_uuid,
+ }
+ vdu_id2uuid[db_vm["osm_id"]] = vm_uuid
+ vdu_id2db_table_index[db_vm["osm_id"]] = db_vms_index
+ if vdu.get("count"):
+ db_vm["count"] = int(vdu["count"])
+
+ # table image
+ image_present = False
+ if vdu.get("image"):
+ image_present = True
+ db_image = {}
+ image_uuid = _lookfor_or_create_image(db_image, mydb, vdu)
+ if not image_uuid:
+ image_uuid = db_image["uuid"]
+ db_images.append(db_image)
+ db_vm["image_id"] = image_uuid
+ if vdu.get("alternative-images"):
+ vm_alternative_images = []
+ for alt_image in vdu.get("alternative-images").values():
+ db_image = {}
+ image_uuid = _lookfor_or_create_image(db_image, mydb, alt_image)
+ if not image_uuid:
+ image_uuid = db_image["uuid"]
+ db_images.append(db_image)
+ vm_alternative_images.append({
+ "image_id": image_uuid,
+ "vim_type": str(alt_image["vim-type"]),
+ # "universal_name": str(alt_image["image"]),
+ # "checksum": str(alt_image["image-checksum"]) if alt_image.get("image-checksum") else None
+ })
+
+ db_vm["image_list"] = yaml.safe_dump(vm_alternative_images, default_flow_style=True, width=256)
+
+ # volumes
+ devices = []
+ if vdu.get("volumes"):
+ for volume_key in vdu["volumes"]:
+ volume = vdu["volumes"][volume_key]
+ if not image_present:
+ # Convert the first volume to vnfc.image
+ image_present = True
+ db_image = {}
+ image_uuid = _lookfor_or_create_image(db_image, mydb, volume)
+ if not image_uuid:
+ image_uuid = db_image["uuid"]
+ db_images.append(db_image)
+ db_vm["image_id"] = image_uuid
+ else:
+ # Add Openmano devices
+ device = {"name": str(volume.get("name"))}
+ device["type"] = str(volume.get("device-type"))
+ if volume.get("size"):
+ device["size"] = int(volume["size"])
+ if volume.get("image"):
+ device["image name"] = str(volume["image"])
+ if volume.get("image-checksum"):
+ device["image checksum"] = str(volume["image-checksum"])
+
+ devices.append(device)
+
+ if not db_vm.get("image_id"):
+ if not db_vm["pdu_type"]:
+ raise NfvoException("Not defined image for VDU")
+ # create a fake image
+
+ # cloud-init
+ boot_data = {}
+ if vdu.get("cloud-init"):
+ boot_data["user-data"] = str(vdu["cloud-init"])
+ elif vdu.get("cloud-init-file"):
+ # TODO Where this file content is present???
+ # boot_data["user-data"] = vnfd_yang.files[vdu["cloud-init-file"]]
+ boot_data["user-data"] = str(vdu["cloud-init-file"])
+
+ if vdu.get("supplemental-boot-data"):
+ if vdu["supplemental-boot-data"].get('boot-data-drive'):
+ boot_data['boot-data-drive'] = True
+ if vdu["supplemental-boot-data"].get('config-file'):
+ om_cfgfile_list = list()
+ for custom_config_file in vdu["supplemental-boot-data"]['config-file'].values():
+ # TODO Where this file content is present???
+ cfg_source = str(custom_config_file["source"])
+ om_cfgfile_list.append({"dest": custom_config_file["dest"],
+ "content": cfg_source})
+ boot_data['config-files'] = om_cfgfile_list
+ if boot_data:
+ db_vm["boot_data"] = yaml.safe_dump(boot_data, default_flow_style=True, width=256)
+
+ db_vms.append(db_vm)
+ db_vms_index += 1
+
+ # table interfaces (internal/external interfaces)
+ flavor_epa_interfaces = []
+ # for iface in chain(vdu.get("internal-interface").values(), vdu.get("external-interface").values()):
+ for iface in vdu.get("interface").values():
+ flavor_epa_interface = {}
+ iface_uuid = str(uuid4())
+ uuid_list.append(iface_uuid)
+ db_interface = {
+ "uuid": iface_uuid,
+ "internal_name": get_str(iface, "name", 255),
+ "vm_id": vm_uuid,
+ }
+ flavor_epa_interface["name"] = db_interface["internal_name"]
+ if iface.get("virtual-interface").get("vpci"):
+ db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12)
+ flavor_epa_interface["vpci"] = db_interface["vpci"]
+
+ if iface.get("virtual-interface").get("bandwidth"):
+ bps = int(iface.get("virtual-interface").get("bandwidth"))
+ db_interface["bw"] = int(math.ceil(bps / 1000000.0))
+ flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"])
+
+ if iface.get("virtual-interface").get("type") == "OM-MGMT":
+ db_interface["type"] = "mgmt"
+ elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+ db_interface["type"] = "bridge"
+ db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+ elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+ db_interface["type"] = "data"
+ db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+ flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \
+ else "yes"
+ flavor_epa_interfaces.append(flavor_epa_interface)
+ else:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
+ "-interface':'type':'{}'. Interface type is not supported".format(
+ vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
+ httperrors.Bad_Request)
+
+ if iface.get("mgmt-interface"):
+ db_interface["type"] = "mgmt"
+
+ if iface.get("external-connection-point-ref"):
+ try:
+ cp = vnfd.get("connection-point")[iface.get("external-connection-point-ref")]
+ db_interface["external_name"] = get_str(cp, "name", 255)
+ cp_name2iface_uuid[db_interface["external_name"]] = iface_uuid
+ cp_name2vdu_id[db_interface["external_name"]] = vdu_id
+ cp_name2vm_uuid[db_interface["external_name"]] = vm_uuid
+ cp_name2db_interface[db_interface["external_name"]] = db_interface
+ for cp_descriptor in vnfd_descriptor["connection-point"]:
+ if cp_descriptor["name"] == db_interface["external_name"]:
+ break
+ else:
+ raise KeyError()
+
+ if vdu_id in vdu_id2cp_name:
+ vdu_id2cp_name[vdu_id] = None # more than two connection point for this VDU
+ else:
+ vdu_id2cp_name[vdu_id] = db_interface["external_name"]
+
+ # port security
+ if "port-security-disable-strategy" in cp_descriptor or str(cp_descriptor.get("port-security-enabled")).lower() == "false":
+ db_interface["port_security"] = 0
+ db_interface["port_security_disable_strategy"] = cp_descriptor.get("port-security-disable-strategy", "full")
+ elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
+ db_interface["port_security"] = 1
+ except KeyError:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+ "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present"
+ " at connection-point".format(
+ vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+ cp=iface.get("vnfd-connection-point-ref")),
+ httperrors.Bad_Request)
+ elif iface.get("internal-connection-point-ref"):
+ try:
+ for icp_descriptor in vdu_descriptor["internal-connection-point"]:
+ if icp_descriptor["id"] == str(iface.get("internal-connection-point-ref")):
+ break
+ else:
+ raise KeyError("does not exist at vdu:internal-connection-point")
+ icp = None
+ icp_vld = None
+ for vld in vnfd.get("internal-vld").values():
+ for cp in vld.get("internal-connection-point").values():
+ if cp.get("id-ref") == iface.get("internal-connection-point-ref"):
+ if icp:
+ raise KeyError("is referenced by more than one 'internal-vld'")
+ icp = cp
+ icp_vld = vld
+ if not icp:
+ raise KeyError("is not referenced by any 'internal-vld'")
+
+ # set network type as data
+ if iface.get("virtual-interface") and iface["virtual-interface"].get("type") in \
+ ("SR-IOV", "PCI-PASSTHROUGH"):
+ db_nets[net_id2index[icp_vld.get("id")]]["type"] = "data"
+ db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
+ if "port-security-disable-strategy" in icp_descriptor or str(icp_descriptor.get("port-security-enabled")).lower() == "false":
+ db_interface["port_security"] = 0
+ db_interface["port_security_disable_strategy"] = icp_descriptor.get("port-security-disable-strategy", "full")
+ elif str(icp_descriptor.get("port-security-enabled")).lower() == "true":
+ db_interface["port_security"] = 1
+ if icp.get("ip-address"):
+ if not icp_vld.get("ip-profile-ref"):
+ raise NfvoException
+ db_interface["ip_address"] = str(icp.get("ip-address"))
+ except KeyError as e:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+ "'interface[{iface}]':'internal-connection-point-ref':'{cp}'"
+ " {msg}".format(
+ vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+ cp=iface.get("internal-connection-point-ref"), msg=str(e)),
+ httperrors.Bad_Request)
+ if iface.get("position"):
+ db_interface["created_at"] = int(iface.get("position")) * 50
+ if iface.get("mac-address"):
+ db_interface["mac"] = str(iface.get("mac-address"))
+ db_interfaces.append(db_interface)
+
+ # table flavors
+ db_flavor = {
+ "name": get_str(vdu, "name", 250) + "-flv",
+ "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
+ "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
+ "disk": int(vdu["vm-flavor"].get("storage-gb", 0)),
+ }
+ # TODO revise the case of several numa-node-policy node
+ extended = {}
+ numa = {}
+ if devices:
+ extended["devices"] = devices
+ if flavor_epa_interfaces:
+ numa["interfaces"] = flavor_epa_interfaces
+ if vdu.get("guest-epa"): # TODO or dedicated_int:
+ epa_vcpu_set = False
+ if vdu["guest-epa"].get("numa-node-policy"): # TODO or dedicated_int:
+ numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
+ if numa_node_policy.get("node"):
+ numa_node = next(iter(numa_node_policy["node"].values()))
+ if numa_node.get("num-cores"):
+ numa["cores"] = numa_node["num-cores"]
+ epa_vcpu_set = True
+ if numa_node.get("paired-threads"):
+ if numa_node["paired-threads"].get("num-paired-threads"):
+ numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+ epa_vcpu_set = True
+ if len(numa_node["paired-threads"].get("paired-thread-ids")):
+ numa["paired-threads-id"] = []
+ for pair in numa_node["paired-threads"]["paired-thread-ids"].values():
+ numa["paired-threads-id"].append(
+ (str(pair["thread-a"]), str(pair["thread-b"]))
+ )
+ if numa_node.get("num-threads"):
+ numa["threads"] = int(numa_node["num-threads"])
+ epa_vcpu_set = True
+ if numa_node.get("memory-mb"):
+ numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+ if vdu["guest-epa"].get("mempage-size"):
+ if vdu["guest-epa"]["mempage-size"] != "SMALL":
+ numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
+ if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
+ if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
+ if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
+ vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+ numa["cores"] = max(db_flavor["vcpus"], 1)
+ else:
+ numa["threads"] = max(db_flavor["vcpus"], 1)
+ epa_vcpu_set = True
+ if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
+ cpuquota = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
+ if cpuquota:
+ extended["cpu-quota"] = cpuquota
+ if vdu["guest-epa"].get("mem-quota"):
+ vduquota = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
+ if vduquota:
+ extended["mem-quota"] = vduquota
+ if vdu["guest-epa"].get("disk-io-quota"):
+ diskioquota = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
+ if diskioquota:
+ extended["disk-io-quota"] = diskioquota
+ if vdu["guest-epa"].get("vif-quota"):
+ vifquota = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
+ if vifquota:
+ extended["vif-quota"] = vifquota
+ if numa:
+ extended["numas"] = [numa]
+ if extended:
+ extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
+ db_flavor["extended"] = extended_text
+ # look if flavor exist
+ temp_flavor_dict = {'disk': db_flavor.get('disk', 0),
+ 'ram': db_flavor.get('ram'),
+ 'vcpus': db_flavor.get('vcpus'),
+ 'extended': db_flavor.get('extended')
+ }
+ existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+ if existing_flavors:
+ flavor_uuid = existing_flavors[0]["uuid"]
+ else:
+ flavor_uuid = str(uuid4())
+ uuid_list.append(flavor_uuid)
+ db_flavor["uuid"] = flavor_uuid
+ db_flavors.append(db_flavor)
+ db_vm["flavor_id"] = flavor_uuid
+
+ # VNF affinity and antiaffinity
+ for pg in vnfd.get("placement-groups").values():
+ pg_name = get_str(pg, "name", 255)
+ for vdu in pg.get("member-vdus").values():
+ vdu_id = get_str(vdu, "member-vdu-ref", 255)
+ if vdu_id not in vdu_id2db_table_index:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
+ "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
+ vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
+ httperrors.Bad_Request)
+ db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+ # TODO consider the case of isolation and not colocation
+ # if pg.get("strategy") == "ISOLATION":
+
+ # VNF mgmt configuration
+ if vnfd["mgmt-interface"].get("vdu-id"):
+ mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255)
+ if mgmt_vdu_id not in vdu_id2uuid:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
+ "'{vdu}'. Reference to a non-existing vdu".format(
+ vnf=vnfd_id, vdu=mgmt_vdu_id),
+ httperrors.Bad_Request)
+ mgmt_access["vm_id"] = vdu_id2uuid[mgmt_vdu_id]
+ mgmt_access["vdu-id"] = mgmt_vdu_id
+ # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
+ if vdu_id2cp_name.get(mgmt_vdu_id):
+ if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:
+ cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
+
+ if vnfd["mgmt-interface"].get("ip-address"):
+ mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
+ if vnfd["mgmt-interface"].get("cp") and vnfd.get("vdu"):
+ if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
+ "Reference to a non-existing connection-point".format(
+ vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
+ httperrors.Bad_Request)
+ mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
+ mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
+ mgmt_access["vdu-id"] = cp_name2vdu_id[vnfd["mgmt-interface"]["cp"]]
+ # mark this interface as of type mgmt
+ if cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]:
+ cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
+
+ default_user = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+ "default-user", 64)
+ if default_user:
+ mgmt_access["default_user"] = default_user
+
+ required = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+ "required", 6)
+ if required:
+ mgmt_access["required"] = required
+
+ password_ = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}),
+ "password", 64)
+ if password_:
+ mgmt_access["password"] = password_
+
+ if mgmt_access:
+ db_vnf["mgmt_access"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)
+
+ db_vnfs.append(db_vnf)
+ db_tables=[
+ {"vnfs": db_vnfs},
+ {"nets": db_nets},
+ {"images": db_images},
+ {"flavors": db_flavors},
+ {"ip_profiles": db_ip_profiles},
+ {"vms": db_vms},
+ {"interfaces": db_interfaces},
+ ]
+
+ logger.debug("create_vnf Deployment done vnfDict: %s",
+ yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+ mydb.new_rows(db_tables, uuid_list)
+ return vnfd_uuid_list
+ except NfvoException:
+ raise
+ except Exception as e:
+ logger.error("Exception {}".format(e))
+ raise # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+
+
+@deprecated("Use new_vnfd_v3")
+def new_vnf(mydb, tenant_id, vnf_descriptor):
+ global global_config
+
+ # Step 1. Check the VNF descriptor
+ check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1)
+ # Step 2. Check tenant exist
+ vims = {}
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in vnf_descriptor["vnf"]:
+ if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+ httperrors.Unauthorized)
+ else:
+ vnf_descriptor['vnf']['tenant_id'] = tenant_id
+ # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+ if global_config["auto_push_VNF_to_VIMs"]:
+ vims = get_vim(mydb, tenant_id, ignore_errors=True)
+
+ # Step 4. Review the descriptor and add missing fields
+ #print vnf_descriptor
+ #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+ vnf_name = vnf_descriptor['vnf']['name']
+ vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+ if "physical" in vnf_descriptor['vnf']:
+ del vnf_descriptor['vnf']['physical']
+ #print vnf_descriptor
+
+ # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+ logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+ logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+ #For each VNFC, we add it to the VNFCDict and we create a flavor.
+ VNFCDict = {} # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+ rollback_list = [] # It will contain the new images created in mano. It is used for rollback
+ try:
+ logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ VNFCitem={}
+ VNFCitem["name"] = vnfc['name']
+ VNFCitem["availability_zone"] = vnfc.get('availability_zone')
+ VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+
+ #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+ myflavorDict = {}
+ myflavorDict["name"] = vnfc['name']+"-flv" #Maybe we could rename the flavor by using the field "image name" if exists
+ myflavorDict["description"] = VNFCitem["description"]
+ myflavorDict["ram"] = vnfc.get("ram", 0)
+ myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+ myflavorDict["disk"] = vnfc.get("disk", 0)
+ myflavorDict["extended"] = {}
+
+ devices = vnfc.get("devices")
+ if devices != None:
+ myflavorDict["extended"]["devices"] = devices
+
+ # TODO:
+ # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+ # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+ # Previous code has been commented
+ #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+ #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+ #else:
+ # result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+ # if result2:
+ # print "Error creating flavor: unknown processor model. Rollback successful."
+ # return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+ # else:
+ # return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+ myflavorDict['extended']['processor_ranking'] = 100 #Hardcoded value, while we decide when the mapping is done
+
+ if 'numas' in vnfc and len(vnfc['numas'])>0:
+ myflavorDict['extended']['numas'] = vnfc['numas']
+
+ #print myflavorDict
+
+ # Step 6.2 New flavors are created in the VIM
+ flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+ #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+ VNFCitem["flavor_id"] = flavor_id
+ VNFCDict[vnfc['name']] = VNFCitem
+
+ logger.debug("Creating new images in the VIM for each VNFC")
+ # Step 6.3 New images are created in the VIM
+ #For each VNFC, we must create the appropriate image.
+ #This "for" loop might be integrated with the previous one
+ #In case this integration is made, the VNFCDict might become a VNFClist.
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+ image_dict={}
+ image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+ image_dict['universal_name']=vnfc.get('image name')
+ image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+ image_dict['location']=vnfc.get('VNFC image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=vnfc.get('image checksum')
+ image_metadata_dict = vnfc.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict is not None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+ VNFCDict[vnfc['name']]["image_id"] = image_id
+ VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+ VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+ if vnfc.get("boot-data"):
+ VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+
+ # Step 7. Storing the VNF descriptor in the repository
+ if "descriptor" not in vnf_descriptor["vnf"]:
+ vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+ # Step 8. Adding the VNF to the NFVO DB
+ vnf_id = mydb.new_vnf_as_a_whole(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+ return vnf_id
+ except (db_base_Exception, vimconn.VimConnException, KeyError) as e:
+ _, message = rollback(mydb, vims, rollback_list)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ elif isinstance(e, KeyError):
+ error_text = "KeyError exception "
+ e.http_code = httperrors.Internal_Server_Error
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+@deprecated("Use new_vnfd_v3")
+def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
+ global global_config
+
+ # Step 1. Check the VNF descriptor
+ check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=2)
+ # Step 2. Check tenant exist
+ vims = {}
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in vnf_descriptor["vnf"]:
+ if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+ httperrors.Unauthorized)
+ else:
+ vnf_descriptor['vnf']['tenant_id'] = tenant_id
+ # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+ if global_config["auto_push_VNF_to_VIMs"]:
+ vims = get_vim(mydb, tenant_id, ignore_errors=True)
+
+ # Step 4. Review the descriptor and add missing fields
+ #print vnf_descriptor
+ #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+ vnf_name = vnf_descriptor['vnf']['name']
+ vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+ if "physical" in vnf_descriptor['vnf']:
+ del vnf_descriptor['vnf']['physical']
+ #print vnf_descriptor
+
+ # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+ logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+ logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+ #For each VNFC, we add it to the VNFCDict and we create a flavor.
+ VNFCDict = {} # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+ rollback_list = [] # It will contain the new images created in mano. It is used for rollback
+ try:
+ logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ VNFCitem={}
+ VNFCitem["name"] = vnfc['name']
+ VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+
+ #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+ myflavorDict = {}
+ myflavorDict["name"] = vnfc['name']+"-flv" #Maybe we could rename the flavor by using the field "image name" if exists
+ myflavorDict["description"] = VNFCitem["description"]
+ myflavorDict["ram"] = vnfc.get("ram", 0)
+ myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+ myflavorDict["disk"] = vnfc.get("disk", 0)
+ myflavorDict["extended"] = {}
+
+ devices = vnfc.get("devices")
+ if devices != None:
+ myflavorDict["extended"]["devices"] = devices
+
+ # TODO:
+ # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+ # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+ # Previous code has been commented
+ #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+ #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+ # myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+ #else:
+ # result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+ # if result2:
+ # print "Error creating flavor: unknown processor model. Rollback successful."
+ # return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+ # else:
+ # return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+ myflavorDict['extended']['processor_ranking'] = 100 #Hardcoded value, while we decide when the mapping is done
+
+ if 'numas' in vnfc and len(vnfc['numas'])>0:
+ myflavorDict['extended']['numas'] = vnfc['numas']
+
+ #print myflavorDict
+
+ # Step 6.2 New flavors are created in the VIM
+ flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+ #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+ VNFCitem["flavor_id"] = flavor_id
+ VNFCDict[vnfc['name']] = VNFCitem
+
+ logger.debug("Creating new images in the VIM for each VNFC")
+ # Step 6.3 New images are created in the VIM
+ #For each VNFC, we must create the appropriate image.
+ #This "for" loop might be integrated with the previous one
+ #In case this integration is made, the VNFCDict might become a VNFClist.
+ for vnfc in vnf_descriptor['vnf']['VNFC']:
+ #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+ image_dict={}
+ image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+ image_dict['universal_name']=vnfc.get('image name')
+ image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+ image_dict['location']=vnfc.get('VNFC image')
+ #image_dict['new_location']=vnfc.get('image location')
+ image_dict['checksum']=vnfc.get('image checksum')
+ image_metadata_dict = vnfc.get('image metadata', None)
+ image_metadata_str = None
+ if image_metadata_dict is not None:
+ image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+ image_dict['metadata']=image_metadata_str
+ #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+ image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+ #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+ VNFCDict[vnfc['name']]["image_id"] = image_id
+ VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+ VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+ if vnfc.get("boot-data"):
+ VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+ # Step 7. Storing the VNF descriptor in the repository
+ if "descriptor" not in vnf_descriptor["vnf"]:
+ vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+ # Step 8. Adding the VNF to the NFVO DB
+ vnf_id = mydb.new_vnf_as_a_whole2(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+ return vnf_id
+ except (db_base_Exception, vimconn.VimConnException, KeyError) as e:
+ _, message = rollback(mydb, vims, rollback_list)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ elif isinstance(e, KeyError):
+ error_text = "KeyError exception "
+ e.http_code = httperrors.Internal_Server_Error
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+
+def get_vnf_id(mydb, tenant_id, vnf_id):
+ #check valid tenant_id
+ check_tenant(mydb, tenant_id)
+ #obtain data
+ where_or = {}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+
+ vnf_id = vnf["uuid"]
+ filter_keys = ('uuid', 'name', 'description', 'public', "tenant_id", "osm_id", "created_at")
+ filtered_content = dict( (k,v) for k,v in vnf.items() if k in filter_keys )
+ #change_keys_http2db(filtered_content, http2db_vnf, reverse=True)
+ data={'vnf' : filtered_content}
+ #GET VM
+ content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id',
+ SELECT=('vms.uuid as uuid', 'vms.osm_id as osm_id', 'vms.name as name', 'vms.description as description',
+ 'boot_data'),
+ WHERE={'vnfs.uuid': vnf_id} )
+ if len(content) != 0:
+ #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+ # change boot_data into boot-data
+ for vm in content:
+ if vm.get("boot_data"):
+ vm["boot-data"] = yaml.safe_load(vm["boot_data"])
+ del vm["boot_data"]
+
+ data['vnf']['VNFC'] = content
+ #TODO: GET all the information from a VNFC and include it in the output.
+
+ #GET NET
+ content = mydb.get_rows(FROM='vnfs join nets on vnfs.uuid=nets.vnf_id',
+ SELECT=('nets.uuid as uuid','nets.name as name','nets.description as description', 'nets.type as type', 'nets.multipoint as multipoint'),
+ WHERE={'vnfs.uuid': vnf_id} )
+ data['vnf']['nets'] = content
+
+ #GET ip-profile for each net
+ for net in data['vnf']['nets']:
+ ipprofiles = mydb.get_rows(FROM='ip_profiles',
+ SELECT=('ip_version','subnet_address','gateway_address','dns_address','dhcp_enabled','dhcp_start_address','dhcp_count'),
+ WHERE={'net_id': net["uuid"]} )
+ if len(ipprofiles)==1:
+ net["ip_profile"] = ipprofiles[0]
+ elif len(ipprofiles)>1:
+ raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+
+
+ #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
+
+ #GET External Interfaces
+ content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces on vms.uuid=interfaces.vm_id',\
+ SELECT=('interfaces.uuid as uuid','interfaces.external_name as external_name', 'vms.name as vm_name', 'interfaces.vm_id as vm_id', \
+ 'interfaces.internal_name as internal_name', 'interfaces.type as type', 'interfaces.vpci as vpci','interfaces.bw as bw'),\
+ WHERE={'vnfs.uuid': vnf_id, 'interfaces.external_name<>': None} )
+ #print content
+ data['vnf']['external-connections'] = content
+
+ return data
+
+
+def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
+ # Check tenant exist
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ # Get the URL of the VIM from the nfvo_tenant and the datacenter
+ vims = get_vim(mydb, tenant_id, ignore_errors=True)
+ else:
+ vims={}
+
+ # Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
+ where_or = {}
+ if tenant_id != "any":
+ where_or["tenant_id"] = tenant_id
+ where_or["public"] = True
+ vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+ vnf_id = vnf["uuid"]
+
+ # "Getting the list of flavors and tenants of the VNF"
+ flavorList = get_flavorlist(mydb, vnf_id)
+ if len(flavorList)==0:
+ logger.warn("delete_vnf error. No flavors found for the VNF id '%s'", vnf_id)
+
+ imageList = get_imagelist(mydb, vnf_id)
+ if len(imageList)==0:
+ logger.warn( "delete_vnf error. No images found for the VNF id '%s'", vnf_id)
+
+ deleted = mydb.delete_row_by_id('vnfs', vnf_id)
+ if deleted == 0:
+ raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+
+ undeletedItems = []
+ for flavor in flavorList:
+ #check if flavor is used by other vnf
+ try:
+ c = mydb.get_rows(FROM='vms', WHERE={'flavor_id':flavor} )
+ if len(c) > 0:
+ logger.debug("Flavor '%s' not deleted because it is being used by another VNF", flavor)
+ continue
+ #flavor not used, must be deleted
+ #delelte at VIM
+ c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id': flavor})
+ for flavor_vim in c:
+ if not flavor_vim['created']: # skip this flavor because not created by openmano
+ continue
+ # look for vim
+ myvim = None
+ for vim in vims.values():
+ if vim["config"]["datacenter_tenant_id"] == flavor_vim["datacenter_vim_id"]:
+ myvim = vim
+ break
+ if not myvim:
+ continue
+ try:
+ myvim.delete_flavor(flavor_vim["vim_id"])
+ except vimconn.VimConnNotFoundException:
+ logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"],
+ flavor_vim["datacenter_vim_id"] )
+ except vimconn.VimConnException as e:
+ logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s",
+ flavor_vim["vim_id"], flavor_vim["datacenter_vim_id"], type(e).__name__, str(e))
+ undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"],
+ flavor_vim["datacenter_vim_id"]))
+ # delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
+ mydb.delete_row_by_id('flavors', flavor)
+ except db_base_Exception as e:
+ logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e))
+ undeletedItems.append("flavor {}".format(flavor))
+
+
+ for image in imageList:
+ try:
+ #check if image is used by other vnf
+ c = mydb.get_rows(FROM='vms', WHERE=[{'image_id': image}, {'image_list LIKE ': '%' + image + '%'}])
+ if len(c) > 0:
+ logger.debug("Image '%s' not deleted because it is being used by another VNF", image)
+ continue
+ #image not used, must be deleted
+ #delelte at VIM
+ c = mydb.get_rows(FROM='datacenters_images', WHERE={'image_id':image})
+ for image_vim in c:
+ if image_vim["datacenter_vim_id"] not in vims: # TODO change to datacenter_tenant_id
+ continue
+ if image_vim['created']=='false': #skip this image because not created by openmano
+ continue
+ myvim=vims[ image_vim["datacenter_id"] ]
+ try:
+ myvim.delete_image(image_vim["vim_id"])
+ except vimconn.VimConnNotFoundException as e:
+ logger.warn("VIM image %s not exist at datacenter %s", image_vim["vim_id"], image_vim["datacenter_id"] )
+ except vimconn.VimConnException as e:
+ logger.error("Not possible to delete VIM image %s from datacenter %s: %s %s",
+ image_vim["vim_id"], image_vim["datacenter_id"], type(e).__name__, str(e))
+ undeletedItems.append("image {} from VIM {}".format(image_vim["vim_id"], image_vim["datacenter_id"] ))
+ #delete image from Database, using table images and with cascade foreign key also at datacenters_images
+ mydb.delete_row_by_id('images', image)
+ except db_base_Exception as e:
+ logger.error("delete_vnf_error. Not possible to get image details and delete '%s'. %s", image, str(e))
+ undeletedItems.append("image {}".format(image))
+
+ return vnf_id + " " + vnf["name"]
+ #if undeletedItems:
+ # return "delete_vnf. Undeleted: %s" %(undeletedItems)
+
+
+@deprecated("Not used")
+def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
+ result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
+ if result < 0:
+ return result, vims
+ elif result == 0:
+ return -httperrors.Not_Found, "datacenter '{}' not found".format(datacenter_name)
+ myvim = next(iter(vims.values()))
+ result,servers = myvim.get_hosts_info()
+ if result < 0:
+ return result, servers
+ topology = {'name':myvim['name'] , 'servers': servers}
+ return result, topology
+
+
+def get_hosts(mydb, nfvo_tenant_id):
+ vims = get_vim(mydb, nfvo_tenant_id)
+ if len(vims) == 0:
+ raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), httperrors.Not_Found)
+ elif len(vims)>1:
+ #print "nfvo.datacenter_action() error. Several datacenters found"
+ raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+ myvim = next(iter(vims.values()))
+ try:
+ hosts = myvim.get_hosts()
+ logger.debug('VIM hosts response: '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False))
+
+ datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
+ for host in hosts:
+ server={'name':host['name'], 'vms':[]}
+ for vm in host['instances']:
+ #get internal name and model
+ try:
+ c = mydb.get_rows(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
+ WHERE={'vim_vm_id':vm['id']} )
+ if len(c) == 0:
+ logger.warn("nfvo.get_hosts virtual machine at VIM '{}' not found at tidnfvo".format(vm['id']))
+ continue
+ server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
+
+ except db_base_Exception as e:
+ logger.warn("nfvo.get_hosts virtual machine at VIM '{}' error {}".format(vm['id'], str(e)))
+ datacenter['Datacenters'][0]['servers'].append(server)
+ #return -400, "en construccion"
+
+ #print 'datacenters '+ json.dumps(datacenter, indent=4)
+ return datacenter
+ except vimconn.VimConnException as e:
+ raise NfvoException("Not possible to get_host_list from VIM: {}".format(str(e)), e.http_code)
+
+
+@deprecated("Use new_nsd_v3")
+def new_scenario(mydb, tenant_id, topo):
+
+# result, vims = get_vim(mydb, tenant_id)
+# if result < 0:
+# return result, vims
+#1: parse input
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in topo:
+ if topo["tenant_id"] != tenant_id:
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
+ httperrors.Unauthorized)
+ else:
+ tenant_id=None
+
+#1.1: get VNFs and external_networks (other_nets).
+ vnfs={}
+ other_nets={} #external_networks, bridge_networks and data_networkds
+ nodes = topo['topology']['nodes']
+ for k in nodes.keys():
+ if nodes[k]['type'] == 'VNF':
+ vnfs[k] = nodes[k]
+ vnfs[k]['ifaces'] = {}
+ elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
+ other_nets[k] = nodes[k]
+ other_nets[k]['external']=True
+ elif nodes[k]['type'] == 'network':
+ other_nets[k] = nodes[k]
+ other_nets[k]['external']=False
+
+
+#1.2: Check that VNF are present at database table vnfs. Insert uuid, description and external interfaces
+ for name,vnf in vnfs.items():
+ where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+ error_text = ""
+ error_pos = "'topology':'nodes':'" + name + "'"
+ if 'vnf_id' in vnf:
+ error_text += " 'vnf_id' " + vnf['vnf_id']
+ where['uuid'] = vnf['vnf_id']
+ if 'VNF model' in vnf:
+ error_text += " 'VNF model' " + vnf['VNF model']
+ where['name'] = vnf['VNF model']
+ if len(where) == 1:
+ raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, httperrors.Bad_Request)
+
+ vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
+ FROM='vnfs',
+ WHERE=where)
+ if len(vnf_db)==0:
+ raise NfvoException("unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+ elif len(vnf_db)>1:
+ raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+ vnf['uuid']=vnf_db[0]['uuid']
+ vnf['description']=vnf_db[0]['description']
+ #get external interfaces
+ ext_ifaces = mydb.get_rows(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
+ FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+ WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+ for ext_iface in ext_ifaces:
+ vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
+
+#1.4 get list of connections
+ conections = topo['topology']['connections']
+ conections_list = []
+ conections_list_name = []
+ for k in conections.keys():
+ if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
+ ifaces_list = conections[k]['nodes'].items()
+ elif type(conections[k]['nodes'])==list: #list with dictionary
+ ifaces_list=[]
+ conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
+ for k2 in conection_pair_list:
+ ifaces_list += k2
+
+ con_type = conections[k].get("type", "link")
+ if con_type != "link":
+ if k in other_nets:
+ raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), httperrors.Bad_Request)
+ other_nets[k] = {'external': False}
+ if conections[k].get("graph"):
+ other_nets[k]["graph"] = conections[k]["graph"]
+ ifaces_list.append( (k, None) )
+
+
+ if con_type == "external_network":
+ other_nets[k]['external'] = True
+ if conections[k].get("model"):
+ other_nets[k]["model"] = conections[k]["model"]
+ else:
+ other_nets[k]["model"] = k
+ if con_type == "dataplane_net" or con_type == "bridge_net":
+ other_nets[k]["model"] = con_type
+
+ conections_list_name.append(k)
+ conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
+ #print set(ifaces_list)
+ #check valid VNF and iface names
+ for iface in ifaces_list:
+ if iface[0] not in vnfs and iface[0] not in other_nets :
+ raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
+ str(k), iface[0]), httperrors.Not_Found)
+ if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
+ raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
+ str(k), iface[0], iface[1]), httperrors.Not_Found)
+
+#1.5 unify connections from the pair list to a consolidated list
+ index=0
+ while index < len(conections_list):
+ index2 = index+1
+ while index2 < len(conections_list):
+ if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
+ conections_list[index] |= conections_list[index2]
+ del conections_list[index2]
+ del conections_list_name[index2]
+ else:
+ index2 += 1
+ conections_list[index] = list(conections_list[index]) # from set to list again
+ index += 1
+ #for k in conections_list:
+ # print k
+
+
+
+#1.6 Delete non external nets
+# for k in other_nets.keys():
+# if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
+# for con in conections_list:
+# delete_indexes=[]
+# for index in range(0,len(con)):
+# if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
+# for index in delete_indexes:
+# del con[index]
+# del other_nets[k]
+#1.7: Check external_ports are present at database table datacenter_nets
+ for k,net in other_nets.items():
+ error_pos = "'topology':'nodes':'" + k + "'"
+ if net['external']==False:
+ if 'name' not in net:
+ net['name']=k
+ if 'model' not in net:
+ raise NfvoException("needed a 'model' at " + error_pos, httperrors.Bad_Request)
+ if net['model']=='bridge_net':
+ net['type']='bridge';
+ elif net['model']=='dataplane_net':
+ net['type']='data';
+ else:
+ raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, httperrors.Not_Found)
+ else: #external
+#IF we do not want to check that external network exist at datacenter
+ pass
+#ELSE
+# error_text = ""
+# WHERE_={}
+# if 'net_id' in net:
+# error_text += " 'net_id' " + net['net_id']
+# WHERE_['uuid'] = net['net_id']
+# if 'model' in net:
+# error_text += " 'model' " + net['model']
+# WHERE_['name'] = net['model']
+# if len(WHERE_) == 0:
+# return -httperrors.Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
+# r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
+# FROM='datacenter_nets', WHERE=WHERE_ )
+# if r<0:
+# print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
+# elif r==0:
+# print "nfvo.new_scenario Error" +error_text+ " is not present at database"
+# return -httperrors.Bad_Request, "unknown " +error_text+ " at " + error_pos
+# elif r>1:
+# print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
+# return -httperrors.Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
+# other_nets[k].update(net_db[0])
+#ENDIF
+ net_list={}
+ net_nb=0 #Number of nets
+ for con in conections_list:
+ #check if this is connected to a external net
+ other_net_index=-1
+ #print
+ #print "con", con
+ for index in range(0,len(con)):
+ #check if this is connected to a external net
+ for net_key in other_nets.keys():
+ if con[index][0]==net_key:
+ if other_net_index>=0:
+ error_text = "There is some interface connected both to net '{}' and net '{}'".format(
+ con[other_net_index][0], net_key)
+ #print "nfvo.new_scenario " + error_text
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ else:
+ other_net_index = index
+ net_target = net_key
+ break
+ #print "other_net_index", other_net_index
+ try:
+ if other_net_index>=0:
+ del con[other_net_index]
+#IF we do not want to check that external network exist at datacenter
+ if other_nets[net_target]['external'] :
+ if "name" not in other_nets[net_target]:
+ other_nets[net_target]['name'] = other_nets[net_target]['model']
+ if other_nets[net_target]["type"] == "external_network":
+ if vnfs[ con[0][0] ]['ifaces'][ con[0][1] ]["type"] == "data":
+ other_nets[net_target]["type"] = "data"
+ else:
+ other_nets[net_target]["type"] = "bridge"
+#ELSE
+# if other_nets[net_target]['external'] :
+# type_='data' if len(con)>1 else 'ptp' #an external net is connected to a external port, so it is ptp if only one connection is done to this net
+# if type_=='data' and other_nets[net_target]['type']=="ptp":
+# error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
+# print "nfvo.new_scenario " + error_text
+# return -httperrors.Bad_Request, error_text
+#ENDIF
+ for iface in con:
+ vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+ else:
+ #create a net
+ net_type_bridge=False
+ net_type_data=False
+ net_target = "__-__net"+str(net_nb)
+ net_list[net_target] = {'name': conections_list_name[net_nb], #"net-"+str(net_nb),
+ 'description':"net-{} in scenario {}".format(net_nb,topo['name']),
+ 'external':False}
+ for iface in con:
+ vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+ iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
+ if iface_type=='mgmt' or iface_type=='bridge':
+ net_type_bridge = True
+ else:
+ net_type_data = True
+ if net_type_bridge and net_type_data:
+ error_text = "Error connection interfaces of bridge type with data type. Firs node {}, iface {}".format(iface[0], iface[1])
+ #print "nfvo.new_scenario " + error_text
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ elif net_type_bridge:
+ type_='bridge'
+ else:
+ type_='data' if len(con)>2 else 'ptp'
+ net_list[net_target]['type'] = type_
+ net_nb+=1
+ except Exception:
+ error_text = "Error connection node {} : {} does not match any VNF or interface".format(iface[0], iface[1])
+ #print "nfvo.new_scenario " + error_text
+ #raise e
+ raise NfvoException(error_text, httperrors.Bad_Request)
+
+#1.8: Connect to management net all not already connected interfaces of type 'mgmt'
+ #1.8.1 obtain management net
+ mgmt_net = mydb.get_rows(SELECT=('uuid','name','description','type','shared'),
+ FROM='datacenter_nets', WHERE={'name':'mgmt'} )
+ #1.8.2 check all interfaces from all vnfs
+ if len(mgmt_net)>0:
+ add_mgmt_net = False
+ for vnf in vnfs.values():
+ for iface in vnf['ifaces'].values():
+ if iface['type']=='mgmt' and 'net_key' not in iface:
+ #iface not connected
+ iface['net_key'] = 'mgmt'
+ add_mgmt_net = True
+ if add_mgmt_net and 'mgmt' not in net_list:
+ net_list['mgmt']=mgmt_net[0]
+ net_list['mgmt']['external']=True
+ net_list['mgmt']['graph']={'visible':False}
+
+ net_list.update(other_nets)
+ #print
+ #print 'net_list', net_list
+ #print
+ #print 'vnfs', vnfs
+ #print
+
+#2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
+ c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
+ 'tenant_id':tenant_id, 'name':topo['name'],
+ 'description':topo.get('description',topo['name']),
+ 'public': topo.get('public', False)
+ })
+
+ return c
+
+
+@deprecated("Use new_nsd_v3")
+def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
+ """ This creates a new scenario for version 0.2 and 0.3"""
+ scenario = scenario_dict["scenario"]
+ if tenant_id != "any":
+ check_tenant(mydb, tenant_id)
+ if "tenant_id" in scenario:
+ if scenario["tenant_id"] != tenant_id:
+ # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
+ raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
+ scenario["tenant_id"], tenant_id), httperrors.Unauthorized)
+ else:
+ tenant_id=None
+
+ # 1: Check that VNF are present at database table vnfs and update content into scenario dict
+ for name,vnf in scenario["vnfs"].items():
+ where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+ error_text = ""
+ error_pos = "'scenario':'vnfs':'" + name + "'"
+ if 'vnf_id' in vnf:
+ error_text += " 'vnf_id' " + vnf['vnf_id']
+ where['uuid'] = vnf['vnf_id']
+ if 'vnf_name' in vnf:
+ error_text += " 'vnf_name' " + vnf['vnf_name']
+ where['name'] = vnf['vnf_name']
+ if len(where) == 1:
+ raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, httperrors.Bad_Request)
+ vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
+ FROM='vnfs',
+ WHERE=where)
+ if len(vnf_db) == 0:
+ raise NfvoException("Unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+ elif len(vnf_db) > 1:
+ raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+ vnf['uuid'] = vnf_db[0]['uuid']
+ vnf['description'] = vnf_db[0]['description']
+ vnf['ifaces'] = {}
+ # get external interfaces
+ ext_ifaces = mydb.get_rows(SELECT=('external_name as name', 'i.uuid as iface_uuid', 'i.type as type'),
+ FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+ WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+ for ext_iface in ext_ifaces:
+ vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type': ext_iface['type']}
+ # TODO? get internal-connections from db.nets and their profiles, and update scenario[vnfs][internal-connections] accordingly
+
+ # 2: Insert net_key and ip_address at every vnf interface
+ for net_name, net in scenario["networks"].items():
+ net_type_bridge = False
+ net_type_data = False
+ for iface_dict in net["interfaces"]:
+ if version == "0.2":
+ temp_dict = iface_dict
+ ip_address = None
+ elif version == "0.3":
+ temp_dict = {iface_dict["vnf"] : iface_dict["vnf_interface"]}
+ ip_address = iface_dict.get('ip_address', None)
+ for vnf, iface in temp_dict.items():
+ if vnf not in scenario["vnfs"]:
+ error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
+ net_name, vnf)
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, httperrors.Not_Found)
+ if iface not in scenario["vnfs"][vnf]['ifaces']:
+ error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
+ .format(net_name, iface)
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
+ error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
+ "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
+ # logger.debug("nfvo.new_scenario_v02 " + error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
+ scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
+ iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
+ if iface_type == 'mgmt' or iface_type == 'bridge':
+ net_type_bridge = True
+ else:
+ net_type_data = True
+
+ if net_type_bridge and net_type_data:
+ error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
+ .format(net_name)
+ # logger.debug("nfvo.new_scenario " + error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ elif net_type_bridge:
+ type_ = 'bridge'
+ else:
+ type_ = 'data' if len(net["interfaces"]) > 2 else 'ptp'
+
+ if net.get("implementation"): # for v0.3
+ if type_ == "bridge" and net["implementation"] == "underlay":
+ error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
+ "'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ elif type_ != "bridge" and net["implementation"] == "overlay":
+ error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
+ "'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ net.pop("implementation")
+ if "type" in net and version == "0.3": # for v0.3
+ if type_ == "data" and net["type"] == "e-line":
+ error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
+ "'e-line' at 'network':'{}'".format(net_name)
+ # logger.debug(error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ elif type_ == "ptp" and net["type"] == "e-lan":
+ type_ = "data"
+
+ net['type'] = type_
+ net['name'] = net_name
+ net['external'] = net.get('external', False)
+
+ # 3: insert at database
+ scenario["nets"] = scenario["networks"]
+ scenario['tenant_id'] = tenant_id
+ scenario_id = mydb.new_scenario(scenario)
+ return scenario_id
+
+
+def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
+ """
+ Parses an OSM IM nsd_catalog and insert at DB
+ :param mydb:
+ :param tenant_id:
+ :param nsd_descriptor:
+ :return: The list of created NSD ids
+ """
+ try:
+ mynsd = nsd_catalog.nsd()
+ try:
+ pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
+ except Exception as e:
+ raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
+ db_scenarios = []
+ db_sce_nets = []
+ db_sce_vnfs = []
+ db_sce_interfaces = []
+ db_sce_vnffgs = []
+ db_sce_rsps = []
+ db_sce_rsp_hops = []
+ db_sce_classifiers = []
+ db_sce_classifier_matches = []
+ db_ip_profiles = []
+ db_ip_profiles_index = 0
+ uuid_list = []
+ nsd_uuid_list = []
+ for nsd_yang in mynsd.nsd_catalog.nsd.values():
+ nsd = nsd_yang.get()
+
+ # table scenarios
+ scenario_uuid = str(uuid4())
+ uuid_list.append(scenario_uuid)
+ nsd_uuid_list.append(scenario_uuid)
+ db_scenario = {
+ "uuid": scenario_uuid,
+ "osm_id": get_str(nsd, "id", 255),
+ "name": get_str(nsd, "name", 255),
+ "description": get_str(nsd, "description", 255),
+ "tenant_id": tenant_id,
+ "vendor": get_str(nsd, "vendor", 255),
+ "short_name": get_str(nsd, "short-name", 255),
+ "descriptor": str(nsd_descriptor)[:60000],
+ }
+ db_scenarios.append(db_scenario)
+
+ # table sce_vnfs (constituent-vnfd)
+ vnf_index2scevnf_uuid = {}
+ vnf_index2vnf_uuid = {}
+ for vnf in nsd.get("constituent-vnfd").values():
+ existing_vnf = mydb.get_rows(FROM="vnfs", WHERE={'osm_id': str(vnf["vnfd-id-ref"])[:255],
+ 'tenant_id': tenant_id})
+ if not existing_vnf:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'constituent-vnfd':'vnfd-id-ref':"
+ "'{}'. Reference to a non-existing VNFD in the catalog".format(
+ str(nsd["id"]), str(vnf["vnfd-id-ref"])[:255]),
+ httperrors.Bad_Request)
+ sce_vnf_uuid = str(uuid4())
+ uuid_list.append(sce_vnf_uuid)
+ db_sce_vnf = {
+ "uuid": sce_vnf_uuid,
+ "scenario_id": scenario_uuid,
+ # "name": get_str(vnf, "member-vnf-index", 255),
+ "name": existing_vnf[0]["name"][:200] + "." + get_str(vnf, "member-vnf-index", 50),
+ "vnf_id": existing_vnf[0]["uuid"],
+ "member_vnf_index": str(vnf["member-vnf-index"]),
+ # TODO 'start-by-default': True
+ }
+ vnf_index2scevnf_uuid[str(vnf['member-vnf-index'])] = sce_vnf_uuid
+ vnf_index2vnf_uuid[str(vnf['member-vnf-index'])] = existing_vnf[0]["uuid"]
+ db_sce_vnfs.append(db_sce_vnf)
+
+ # table ip_profiles (ip-profiles)
+ ip_profile_name2db_table_index = {}
+ for ip_profile in nsd.get("ip-profiles").values():
+ db_ip_profile = {
+ "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+ "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+ "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+ "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+ "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+ "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+ }
+ dns_list = []
+ for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+ dns_list.append(str(dns.get("address")))
+ db_ip_profile["dns_address"] = ";".join(dns_list)
+ if ip_profile["ip-profile-params"].get('security-group'):
+ db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+ ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+ db_ip_profiles_index += 1
+ db_ip_profiles.append(db_ip_profile)
+
+ # table sce_nets (internal-vld)
+ for vld in nsd.get("vld").values():
+ sce_net_uuid = str(uuid4())
+ uuid_list.append(sce_net_uuid)
+ db_sce_net = {
+ "uuid": sce_net_uuid,
+ "name": get_str(vld, "name", 255),
+ "scenario_id": scenario_uuid,
+ # "type": #TODO
+ "multipoint": not vld.get("type") == "ELINE",
+ "osm_id": get_str(vld, "id", 255),
+ # "external": #TODO
+ "description": get_str(vld, "description", 255),
+ }
+ # guess type of network
+ if vld.get("mgmt-network"):
+ db_sce_net["type"] = "bridge"
+ db_sce_net["external"] = True
+ elif vld.get("provider-network").get("overlay-type") == "VLAN":
+ db_sce_net["type"] = "data"
+ else:
+ # later on it will be fixed to bridge or data depending on the type of interfaces attached to it
+ db_sce_net["type"] = None
+ db_sce_nets.append(db_sce_net)
+
+ # ip-profile, link db_ip_profile with db_sce_net
+ if vld.get("ip-profile-ref"):
+ ip_profile_name = vld.get("ip-profile-ref")
+ if ip_profile_name not in ip_profile_name2db_table_index:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'ip-profile-ref':'{}'."
+ " Reference to a non-existing 'ip_profiles'".format(
+ str(nsd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+ httperrors.Bad_Request)
+ db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["sce_net_id"] = sce_net_uuid
+ elif vld.get("vim-network-name"):
+ db_sce_net["vim_network_name"] = get_str(vld, "vim-network-name", 255)
+
+ # table sce_interfaces (vld:vnfd-connection-point-ref)
+ for iface in vld.get("vnfd-connection-point-ref").values():
+ # Check if there are VDUs in the descriptor
+ vnf_index = str(iface['member-vnf-index-ref'])
+ existing_vdus = mydb.get_rows(SELECT=('vms.uuid'), FROM="vms", WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index]})
+ if existing_vdus:
+ # check correct parameters
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
+ httperrors.Bad_Request)
+
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ httperrors.Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+ if existing_ifaces[0]["iface_type"] == "data":
+ db_sce_net["type"] = "data"
+ sce_interface_uuid = str(uuid4())
+ uuid_list.append(sce_net_uuid)
+ iface_ip_address = None
+ if iface.get("ip-address"):
+ iface_ip_address = str(iface.get("ip-address"))
+ db_sce_interface = {
+ "uuid": sce_interface_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "sce_net_id": sce_net_uuid,
+ "interface_id": interface_uuid,
+ "ip_address": iface_ip_address,
+ }
+ db_sce_interfaces.append(db_sce_interface)
+ if not db_sce_net["type"]:
+ db_sce_net["type"] = "bridge"
+
+ # table sce_vnffgs (vnffgd)
+ for vnffg in nsd.get("vnffgd").values():
+ sce_vnffg_uuid = str(uuid4())
+ uuid_list.append(sce_vnffg_uuid)
+ db_sce_vnffg = {
+ "uuid": sce_vnffg_uuid,
+ "name": get_str(vnffg, "name", 255),
+ "scenario_id": scenario_uuid,
+ "vendor": get_str(vnffg, "vendor", 255),
+ "description": get_str(vld, "description", 255),
+ }
+ db_sce_vnffgs.append(db_sce_vnffg)
+
+ # deal with rsps
+ for rsp in vnffg.get("rsp").values():
+ sce_rsp_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_uuid)
+ db_sce_rsp = {
+ "uuid": sce_rsp_uuid,
+ "name": get_str(rsp, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+ }
+ db_sce_rsps.append(db_sce_rsp)
+ for iface in rsp.get("vnfd-connection-point-ref").values():
+ vnf_index = str(iface['member-vnf-index-ref'])
+ if_order = int(iface['order'])
+ # check correct parameters
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+ httperrors.Bad_Request)
+
+ ingress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={
+ 'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-ingress-connection-point-ref",
+ 255)})
+ if not ingress_existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-ingress-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-ingress-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]), httperrors.Bad_Request)
+
+ egress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={
+ 'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-egress-connection-point-ref",
+ 255)})
+ if not egress_existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-egress-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-egress-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request)
+
+ ingress_interface_uuid = ingress_existing_ifaces[0]["uuid"]
+ egress_interface_uuid = egress_existing_ifaces[0]["uuid"]
+ sce_rsp_hop_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_hop_uuid)
+ db_sce_rsp_hop = {
+ "uuid": sce_rsp_hop_uuid,
+ "if_order": if_order,
+ "ingress_interface_id": ingress_interface_uuid,
+ "egress_interface_id": egress_interface_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "sce_rsp_id": sce_rsp_uuid,
+ }
+ db_sce_rsp_hops.append(db_sce_rsp_hop)
+
+ # deal with classifiers
+ for classifier in vnffg.get("classifier").values():
+ sce_classifier_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_uuid)
+
+ # source VNF
+ vnf_index = str(classifier['member-vnf-index-ref'])
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+ httperrors.Bad_Request)
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(classifier, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ httperrors.Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+
+ db_sce_classifier = {
+ "uuid": sce_classifier_uuid,
+ "name": get_str(classifier, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "interface_id": interface_uuid,
+ }
+ rsp_id = get_str(classifier, "rsp-id-ref", 255)
+ rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+ db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+ db_sce_classifiers.append(db_sce_classifier)
+
+ for match in classifier.get("match-attributes").values():
+ sce_classifier_match_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_match_uuid)
+ db_sce_classifier_match = {
+ "uuid": sce_classifier_match_uuid,
+ "ip_proto": get_str(match, "ip-proto", 2),
+ "source_ip": get_str(match, "source-ip-address", 16),
+ "destination_ip": get_str(match, "destination-ip-address", 16),
+ "source_port": get_str(match, "source-port", 5),
+ "destination_port": get_str(match, "destination-port", 5),
+ "sce_classifier_id": sce_classifier_uuid,
+ }
+ db_sce_classifier_matches.append(db_sce_classifier_match)
+ # TODO: vnf/cp keys
+
+ # remove unneeded id's in sce_rsps
+ for rsp in db_sce_rsps:
+ rsp.pop('id')
+
+ db_tables = [
+ {"scenarios": db_scenarios},
+ {"sce_nets": db_sce_nets},
+ {"ip_profiles": db_ip_profiles},
+ {"sce_vnfs": db_sce_vnfs},
+ {"sce_interfaces": db_sce_interfaces},
+ {"sce_vnffgs": db_sce_vnffgs},
+ {"sce_rsps": db_sce_rsps},
+ {"sce_rsp_hops": db_sce_rsp_hops},
+ {"sce_classifiers": db_sce_classifiers},
+ {"sce_classifier_matches": db_sce_classifier_matches},
+ ]
+
+ logger.debug("new_nsd_v3 done: %s",
+ yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+ mydb.new_rows(db_tables, uuid_list)
+ return nsd_uuid_list
+ except NfvoException:
+ raise
+ except Exception as e:
+ logger.error("Exception {}".format(e))
+ raise # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+
+
+def edit_scenario(mydb, tenant_id, scenario_id, data):
+ data["uuid"] = scenario_id
+ data["tenant_id"] = tenant_id
+ c = mydb.edit_scenario( data )
+ return c
+
+
+@deprecated("Use create_instance")
+def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
+ #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter, vim_tenant=vim_tenant)
+ vims = {datacenter_id: myvim}
+ myvim_tenant = myvim['tenant_id']
+ datacenter_name = myvim['name']
+
+ rollbackList=[]
+ try:
+ #print "Checking that the scenario_id exists and getting the scenario dictionary"
+ scenarioDict = mydb.get_scenario(scenario_id, tenant_id, datacenter_id=datacenter_id)
+ scenarioDict['datacenter2tenant'] = { datacenter_id: myvim['config']['datacenter_tenant_id'] }
+ scenarioDict['datacenter_id'] = datacenter_id
+ #print '================scenarioDict======================='
+ #print json.dumps(scenarioDict, indent=4)
+ #print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
+
+ logger.debug("start_scenario Scenario %s: consisting of %d VNF(s)", scenarioDict['name'],len(scenarioDict['vnfs']))
+ #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+
+ auxNetDict = {} #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+ auxNetDict['scenario'] = {}
+
+ logger.debug("start_scenario 1. Creating new nets (sce_nets) in the VIM")
+ for sce_net in scenarioDict['nets']:
+ #print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
+
+ myNetName = "{}.{}".format(instance_scenario_name, sce_net['name'])
+ myNetName = myNetName[0:255] #limit length
+ myNetType = sce_net['type']
+ myNetDict = {}
+ myNetDict["name"] = myNetName
+ myNetDict["type"] = myNetType
+ myNetDict["tenant_id"] = myvim_tenant
+ myNetIPProfile = sce_net.get('ip_profile', None)
+ myProviderNetwork = sce_net.get('provider_network', None)
+ #TODO:
+ #We should use the dictionary as input parameter for new_network
+ #print myNetDict
+ if not sce_net["external"]:
+ network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile, provider_network_profile=myProviderNetwork)
+ #print "New VIM network created for scenario %s. Network id: %s" % (scenarioDict['name'],network_id)
+ sce_net['vim_id'] = network_id
+ auxNetDict['scenario'][sce_net['uuid']] = network_id
+ rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+ sce_net["created"] = True
+ else:
+ if sce_net['vim_id'] == None:
+ error_text = "Error, datacenter '{}' does not have external network '{}'.".format(
+ datacenter_name, sce_net['name'])
+ _, message = rollback(mydb, vims, rollbackList)
+ logger.error("nfvo.start_scenario: %s", error_text)
+ raise NfvoException(error_text, httperrors.Bad_Request)
+ logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
+ auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
+
+ logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
+ #For each vnf net, we create it and we add it to instanceNetlist.
+
+ for sce_vnf in scenarioDict['vnfs']:
+ for net in sce_vnf['nets']:
+ #print "Net name: %s. Description: %s" % (net["name"], net["description"])
+
+ myNetName = "{}.{}".format(instance_scenario_name,net['name'])
+ myNetName = myNetName[0:255] #limit length
+ myNetType = net['type']
+ myNetDict = {}
+ myNetDict["name"] = myNetName
+ myNetDict["type"] = myNetType
+ myNetDict["tenant_id"] = myvim_tenant
+ myNetIPProfile = net.get('ip_profile', None)
+ myProviderNetwork = sce_net.get('provider_network', None)
+ #print myNetDict
+ #TODO:
+ #We should use the dictionary as input parameter for new_network
+ network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile, provider_network_profile=myProviderNetwork)
+ #print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
+ net['vim_id'] = network_id
+ if sce_vnf['uuid'] not in auxNetDict:
+ auxNetDict[sce_vnf['uuid']] = {}
+ auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
+ rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+ net["created"] = True
+
+ #print "auxNetDict:"
+ #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+
+ logger.debug("start_scenario 3. Creating new vm instances in the VIM")
+ #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+ i = 0
+ for sce_vnf in scenarioDict['vnfs']:
+ vnf_availability_zones = []
+ for vm in sce_vnf['vms']:
+ vm_av = vm.get('availability_zone')
+ if vm_av and vm_av not in vnf_availability_zones:
+ vnf_availability_zones.append(vm_av)
+
+ # check if there is enough availability zones available at vim level.
+ if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+ if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+ raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+
+ for vm in sce_vnf['vms']:
+ i += 1
+ myVMDict = {}
+ #myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
+ myVMDict['name'] = "{}.{}.{}".format(instance_scenario_name,sce_vnf['name'],chr(96+i))
+ #myVMDict['description'] = vm['description']
+ myVMDict['description'] = myVMDict['name'][0:99]
+ if not startvms:
+ myVMDict['start'] = "no"
+ myVMDict['name'] = myVMDict['name'][0:255] #limit name length
+ #print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
+
+ #create image at vim in case it not exist
+ image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
+ image_id = create_or_use_image(mydb, vims, image_dict, [], True)
+ vm['vim_image_id'] = image_id
+
+ #create flavor at vim in case it not exist
+ flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+ if flavor_dict['extended']!=None:
+ flavor_dict['extended']= yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+ flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
+ vm['vim_flavor_id'] = flavor_id
+
+
+ myVMDict['imageRef'] = vm['vim_image_id']
+ myVMDict['flavorRef'] = vm['vim_flavor_id']
+ myVMDict['networks'] = []
+ for iface in vm['interfaces']:
+ netDict = {}
+ if iface['type']=="data":
+ netDict['type'] = iface['model']
+ elif "model" in iface and iface["model"]!=None:
+ netDict['model']=iface['model']
+ #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
+ #discover type of interface looking at flavor
+ for numa in flavor_dict.get('extended',{}).get('numas',[]):
+ for flavor_iface in numa.get('interfaces',[]):
+ if flavor_iface.get('name') == iface['internal_name']:
+ if flavor_iface['dedicated'] == 'yes':
+ netDict['type']="PF" #passthrough
+ elif flavor_iface['dedicated'] == 'no':
+ netDict['type']="VF" #siov
+ elif flavor_iface['dedicated'] == 'yes:sriov':
+ netDict['type']="VFnotShared" #sriov but only one sriov on the PF
+ netDict["mac_address"] = flavor_iface.get("mac_address")
+ break;
+ netDict["use"]=iface['type']
+ if netDict["use"]=="data" and not netDict.get("type"):
+ #print "netDict", netDict
+ #print "iface", iface
+ e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".format(
+ sce_vnf['name'], vm['name'], iface['internal_name'])
+ if flavor_dict.get('extended')==None:
+ raise NfvoException(e_text + "After database migration some information is not available. \
+ Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+ else:
+ raise NfvoException(e_text, httperrors.Internal_Server_Error)
+ if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+ netDict["type"]="virtual"
+ if "vpci" in iface and iface["vpci"] is not None:
+ netDict['vpci'] = iface['vpci']
+ if "mac" in iface and iface["mac"] is not None:
+ netDict['mac_address'] = iface['mac']
+ if "port-security" in iface and iface["port-security"] is not None:
+ netDict['port_security'] = iface['port-security']
+ if "floating-ip" in iface and iface["floating-ip"] is not None:
+ netDict['floating_ip'] = iface['floating-ip']
+ netDict['name'] = iface['internal_name']
+ if iface['net_id'] is None:
+ for vnf_iface in sce_vnf["interfaces"]:
+ #print iface
+ #print vnf_iface
+ if vnf_iface['interface_id']==iface['uuid']:
+ netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
+ break
+ else:
+ netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
+ #skip bridge ifaces not connected to any net
+ #if 'net_id' not in netDict or netDict['net_id']==None:
+ # continue
+ myVMDict['networks'].append(netDict)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ #print myVMDict['name']
+ #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+ #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+ #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+
+ if 'availability_zone' in myVMDict:
+ av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+ else:
+ av_index = None
+
+ vm_id, _ = myvim.new_vminstance(myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
+ myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
+ availability_zone_index=av_index,
+ availability_zone_list=vnf_availability_zones)
+ #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
+ vm['vim_id'] = vm_id
+ rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+ #put interface uuid back to scenario[vnfs][vms[[interfaces]
+ for net in myVMDict['networks']:
+ if "vim_id" in net:
+ for iface in vm['interfaces']:
+ if net["name"]==iface["internal_name"]:
+ iface["vim_id"]=net["vim_id"]
+ break
+
+ logger.debug("start scenario Deployment done")
+ #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+ #r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
+ instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_scenario_name, instance_scenario_description, scenarioDict)
+ return mydb.get_instance_scenario(instance_id)
+
+ except (db_base_Exception, vimconn.VimConnException) as e:
+ _, message = rollback(mydb, vims, rollbackList)
+ if isinstance(e, db_base_Exception):
+ error_text = "Exception at database"
+ else:
+ error_text = "Exception at VIM"
+ error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+ #logger.error("start_scenario %s", error_text)
+ raise NfvoException(error_text, e.http_code)
+
+def unify_cloud_config(cloud_config_preserve, cloud_config):
+ """ join the cloud config information into cloud_config_preserve.
+ In case of conflict cloud_config_preserve preserves
+ None is allowed
+ """
+ if not cloud_config_preserve and not cloud_config:
+ return None
+
+ new_cloud_config = {"key-pairs":[], "users":[]}
+ # key-pairs
+ if cloud_config_preserve:
+ for key in cloud_config_preserve.get("key-pairs", () ):
+ if key not in new_cloud_config["key-pairs"]:
+ new_cloud_config["key-pairs"].append(key)
+ if cloud_config:
+ for key in cloud_config.get("key-pairs", () ):
+ if key not in new_cloud_config["key-pairs"]:
+ new_cloud_config["key-pairs"].append(key)
+ if not new_cloud_config["key-pairs"]:
+ del new_cloud_config["key-pairs"]
+
+ # users
+ if cloud_config:
+ new_cloud_config["users"] += cloud_config.get("users", () )
+ if cloud_config_preserve:
+ new_cloud_config["users"] += cloud_config_preserve.get("users", () )
+ index_to_delete = []
+ users = new_cloud_config.get("users", [])
+ for index0 in range(0,len(users)):
+ if index0 in index_to_delete:
+ continue
+ for index1 in range(index0+1,len(users)):
+ if index1 in index_to_delete:
+ continue
+ if users[index0]["name"] == users[index1]["name"]:
+ index_to_delete.append(index1)
+ for key in users[index1].get("key-pairs",()):
+ if "key-pairs" not in users[index0]:
+ users[index0]["key-pairs"] = [key]
+ elif key not in users[index0]["key-pairs"]:
+ users[index0]["key-pairs"].append(key)
+ index_to_delete.sort(reverse=True)
+ for index in index_to_delete:
+ del users[index]
+ if not new_cloud_config["users"]:
+ del new_cloud_config["users"]
+
+ #boot-data-drive
+ if cloud_config and cloud_config.get("boot-data-drive") != None:
+ new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
+ if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
+ new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
+
+ # user-data
+ new_cloud_config["user-data"] = []
+ if cloud_config and cloud_config.get("user-data"):
+ if isinstance(cloud_config["user-data"], list):
+ new_cloud_config["user-data"] += cloud_config["user-data"]
+ else:
+ new_cloud_config["user-data"].append(cloud_config["user-data"])
+ if cloud_config_preserve and cloud_config_preserve.get("user-data"):
+ if isinstance(cloud_config_preserve["user-data"], list):
+ new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
+ else:
+ new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
+ if not new_cloud_config["user-data"]:
+ del new_cloud_config["user-data"]
+
+ # config files
+ new_cloud_config["config-files"] = []
+ if cloud_config and cloud_config.get("config-files") != None:
+ new_cloud_config["config-files"] += cloud_config["config-files"]
+ if cloud_config_preserve:
+ for file in cloud_config_preserve.get("config-files", ()):
+ for index in range(0, len(new_cloud_config["config-files"])):
+ if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
+ new_cloud_config["config-files"][index] = file
+ break
+ else:
+ new_cloud_config["config-files"].append(file)
+ if not new_cloud_config["config-files"]:
+ del new_cloud_config["config-files"]
+ return new_cloud_config
+
+
+def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
+ global plugins
+ datacenter_id = None
+ datacenter_name = None
+ thread = None
+ try:
+ if datacenter_tenant_id:
+ thread_id = datacenter_tenant_id
+ thread = vim_threads["running"].get(datacenter_tenant_id)
+ if not thread:
+ where_={"td.nfvo_tenant_id": tenant_id}
+ if datacenter_id_name:
+ if utils.check_valid_uuid(datacenter_id_name):
+ datacenter_id = datacenter_id_name
+ where_["dt.datacenter_id"] = datacenter_id
+ else:
+ datacenter_name = datacenter_id_name
+ where_["d.name"] = datacenter_name
+ if datacenter_tenant_id:
+ where_["dt.uuid"] = datacenter_tenant_id
+ datacenters = mydb.get_rows(
+ SELECT=("dt.uuid as datacenter_tenant_id, d.name as datacenter_name", "d.type as type"),
+ FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
+ "join datacenters as d on d.uuid=dt.datacenter_id",
+ WHERE=where_)
+ if len(datacenters) > 1:
+ raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+ elif datacenters:
+ thread_id = datacenters[0]["datacenter_tenant_id"]
+ datacenter_name = datacenters[0]["datacenter_name"]
+ thread = vim_threads["running"].get(thread_id)
+ if not thread:
+ datacenter_type = datacenters[0]["type"]
+ plugin_name = "rovim_" + datacenter_type
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="vim")
+ thread_name = get_non_used_vim_name(datacenter_name, datacenter_id)
+ thread = vim_thread(task_lock, plugins, thread_name, None,
+ thread_id, db=mydb)
+ thread.start()
+ vim_threads["running"][thread_id] = thread
+ if not thread:
+ raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+ return thread_id, thread
+ except db_base_Exception as e:
+ raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
+
+
+def get_datacenter_uuid(mydb, tenant_id, datacenter_id_name):
+ WHERE_dict={}
+ if utils.check_valid_uuid(datacenter_id_name):
+ WHERE_dict['d.uuid'] = datacenter_id_name
+ else:
+ WHERE_dict['d.name'] = datacenter_id_name
+
+ if tenant_id:
+ WHERE_dict['nfvo_tenant_id'] = tenant_id
+ from_= "tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as" \
+ " dt on td.datacenter_tenant_id=dt.uuid"
+ else:
+ from_ = 'datacenters as d'
+ vimaccounts = mydb.get_rows(FROM=from_, SELECT=("d.uuid as uuid", "d.name as name", "d.type as type"),
+ WHERE=WHERE_dict)
+ if len(vimaccounts) == 0:
+ raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+ elif len(vimaccounts) > 1:
+ # print "nfvo.datacenter_action() error. Several datacenters found"
+ raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+ return vimaccounts[0]["uuid"], vimaccounts[0]
+
+
+def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
+ datacenter_id = None
+ datacenter_name = None
+ if datacenter_id_name:
+ if utils.check_valid_uuid(datacenter_id_name):
+ datacenter_id = datacenter_id_name
+ else:
+ datacenter_name = datacenter_id_name
+ vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
+ if len(vims) == 0:
+ raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+ elif len(vims)>1:
+ #print "nfvo.datacenter_action() error. Several datacenters found"
+ raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+ for vim_id, vim_content in vims.items():
+ return vim_id, vim_content
+
+
+def update(d, u):
+ """Takes dict d and updates it with the values in dict u.
+ It merges all depth levels"""
+ for k, v in u.items():
+ if isinstance(v, collections.Mapping):
+ r = update(d.get(k, {}), v)
+ d[k] = r
+ else:
+ d[k] = u[k]
+ return d
+
+
+def _get_wim(db, wim_account_id):
+ # get wim from wim_account
+ wim_accounts = db.get_rows(FROM='wim_accounts', WHERE={"uuid": wim_account_id})
+ if not wim_accounts:
+ raise NfvoException("Not found sdn id={}".format(wim_account_id), http_code=httperrors.Not_Found)
+ return wim_accounts[0]["wim_id"]
+
+
+def create_instance(mydb, tenant_id, instance_dict):
+ # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ # logger.debug("Creating instance...")
+
+ scenario = instance_dict["scenario"]
+
+ # find main datacenter
+ myvims = {}
+ myvim_threads_id = {}
+ datacenter = instance_dict.get("datacenter")
+ default_wim_account = instance_dict.get("wim_account")
+ default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ myvims[default_datacenter_id] = vim
+ myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
+ tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
+ # myvim_tenant = myvim['tenant_id']
+ rollbackList = []
+
+ # print "Checking that the scenario exists and getting the scenario dictionary"
+ if isinstance(scenario, str):
+ scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
+ datacenter_id=default_datacenter_id)
+ else:
+ scenarioDict = scenario
+ scenarioDict["uuid"] = None
+
+ # logger.debug(">>>>>> Dictionaries before merging")
+ # logger.debug(">>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
+ # logger.debug(">>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
+
+ db_instance_vnfs = []
+ db_instance_vms = []
+ db_instance_interfaces = []
+ db_instance_sfis = []
+ db_instance_sfs = []
+ db_instance_classifications = []
+ db_instance_sfps = []
+ db_ip_profiles = []
+ db_vim_actions = []
+ uuid_list = []
+ task_index = 0
+ instance_name = instance_dict["name"]
+ instance_uuid = str(uuid4())
+ uuid_list.append(instance_uuid)
+ db_instance_scenario = {
+ "uuid": instance_uuid,
+ "name": instance_name,
+ "tenant_id": tenant_id,
+ "scenario_id": scenarioDict['uuid'],
+ "datacenter_id": default_datacenter_id,
+ # filled bellow 'datacenter_tenant_id'
+ "description": instance_dict.get("description"),
+ }
+ if scenarioDict.get("cloud-config"):
+ db_instance_scenario["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"],
+ default_flow_style=True, width=256)
+ instance_action_id = get_task_id()
+ db_instance_action = {
+ "uuid": instance_action_id, # same uuid for the instance and the action on create
+ "tenant_id": tenant_id,
+ "instance_id": instance_uuid,
+ "description": "CREATE",
+ }
+
+ # Auxiliary dictionaries from x to y
+ sce_net2wim_instance = {}
+ sce_net2instance = {}
+ net2task_id = {'scenario': {}}
+ # Mapping between local networks and WIMs
+ wim_usage = {}
+
+ def ip_profile_IM2RO(ip_profile_im):
+ # translate from input format to database format
+ ip_profile_ro = {}
+ if 'subnet-address' in ip_profile_im:
+ ip_profile_ro['subnet_address'] = ip_profile_im['subnet-address']
+ if 'ip-version' in ip_profile_im:
+ ip_profile_ro['ip_version'] = ip_profile_im['ip-version']
+ if 'gateway-address' in ip_profile_im:
+ ip_profile_ro['gateway_address'] = ip_profile_im['gateway-address']
+ if 'dns-address' in ip_profile_im:
+ ip_profile_ro['dns_address'] = ip_profile_im['dns-address']
+ if isinstance(ip_profile_ro['dns_address'], (list, tuple)):
+ ip_profile_ro['dns_address'] = ";".join(ip_profile_ro['dns_address'])
+ if 'dhcp' in ip_profile_im:
+ ip_profile_ro['dhcp_start_address'] = ip_profile_im['dhcp'].get('start-address')
+ ip_profile_ro['dhcp_enabled'] = ip_profile_im['dhcp'].get('enabled', True)
+ ip_profile_ro['dhcp_count'] = ip_profile_im['dhcp'].get('count')
+ return ip_profile_ro
+
+ # logger.debug("Creating instance from scenario-dict:\n%s",
+ # yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+ try:
+ # 0 check correct parameters
+ for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+ for scenario_net in scenarioDict['nets']:
+ if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+ break
+ else:
+ raise NfvoException("Invalid scenario network name or id '{}' at instance:networks".format(net_name),
+ httperrors.Bad_Request)
+ if "sites" not in net_instance_desc:
+ net_instance_desc["sites"] = [ {} ]
+ site_without_datacenter_field = False
+ for site in net_instance_desc["sites"]:
+ if site.get("datacenter"):
+ site["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, site["datacenter"])
+ if site["datacenter"] not in myvims:
+ # Add this datacenter to myvims
+ d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
+ myvims[d] = v
+ myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, site["datacenter"])
+ site["datacenter"] = d # change name to id
+ else:
+ if site_without_datacenter_field:
+ raise NfvoException("Found more than one entries without datacenter field at "
+ "instance:networks:{}:sites".format(net_name), httperrors.Bad_Request)
+ site_without_datacenter_field = True
+ site["datacenter"] = default_datacenter_id # change name to id
+
+ for vnf_name, vnf_instance_desc in instance_dict.get("vnfs",{}).items():
+ for scenario_vnf in scenarioDict['vnfs']:
+ if vnf_name == scenario_vnf['member_vnf_index'] or vnf_name == scenario_vnf['uuid'] or vnf_name == scenario_vnf['name']:
+ break
+ else:
+ raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), httperrors.Bad_Request)
+ if "datacenter" in vnf_instance_desc:
+ # Add this datacenter to myvims
+ vnf_instance_desc["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+ if vnf_instance_desc["datacenter"] not in myvims:
+ d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+ myvims[d] = v
+ myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
+ scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
+
+ for net_id, net_instance_desc in vnf_instance_desc.get("networks", {}).items():
+ for scenario_net in scenario_vnf['nets']:
+ if net_id == scenario_net['osm_id'] or net_id == scenario_net['uuid'] or net_id == scenario_net["name"]:
+ break
+ else:
+ raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
+ if net_instance_desc.get("vim-network-name"):
+ scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
+ if net_instance_desc.get("vim-network-id"):
+ scenario_net["vim-network-id"] = net_instance_desc["vim-network-id"]
+ if net_instance_desc.get("name"):
+ scenario_net["name"] = net_instance_desc["name"]
+ if 'ip-profile' in net_instance_desc:
+ ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+ if 'ip_profile' not in scenario_net:
+ scenario_net['ip_profile'] = ipprofile_db
+ else:
+ update(scenario_net['ip_profile'], ipprofile_db)
+
+ if net_instance_desc.get('provider-network'):
+ provider_network_db = net_instance_desc['provider-network']
+ if 'provider_network' not in scenario_net:
+ scenario_net['provider_network'] = provider_network_db
+ else:
+ update(scenario_net['provider_network'], provider_network_db)
+
+ for vdu_id, vdu_instance_desc in vnf_instance_desc.get("vdus", {}).items():
+ for scenario_vm in scenario_vnf['vms']:
+ if vdu_id == scenario_vm['osm_id'] or vdu_id == scenario_vm["name"]:
+ break
+ else:
+ raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+ scenario_vm["instance_parameters"] = vdu_instance_desc
+ for iface_id, iface_instance_desc in vdu_instance_desc.get("interfaces", {}).items():
+ for scenario_interface in scenario_vm['interfaces']:
+ if iface_id == scenario_interface['internal_name'] or iface_id == scenario_interface["external_name"]:
+ scenario_interface.update(iface_instance_desc)
+ break
+ else:
+ raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+
+ # 0.1 parse cloud-config parameters
+ cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
+
+ # 0.2 merge instance information into scenario
+ # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
+ # However, this is not possible yet.
+ for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+ for scenario_net in scenarioDict['nets']:
+ if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+ if "wim_account" in net_instance_desc and net_instance_desc["wim_account"] is not None:
+ scenario_net["wim_account"] = net_instance_desc["wim_account"]
+ if 'ip-profile' in net_instance_desc:
+ ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+ if 'ip_profile' not in scenario_net:
+ scenario_net['ip_profile'] = ipprofile_db
+ else:
+ update(scenario_net['ip_profile'], ipprofile_db)
+ if 'provider-network' in net_instance_desc:
+ provider_network_db = net_instance_desc['provider-network']
+
+ if 'provider-network' not in scenario_net:
+ scenario_net['provider_network'] = provider_network_db
+ else:
+ update(scenario_net['provider-network'], provider_network_db)
+
+ for interface in net_instance_desc.get('interfaces', ()):
+ if 'ip_address' in interface:
+ for vnf in scenarioDict['vnfs']:
+ if interface['vnf'] == vnf['name']:
+ for vnf_interface in vnf['interfaces']:
+ if interface['vnf_interface'] == vnf_interface['external_name']:
+ vnf_interface['ip_address'] = interface['ip_address']
+
+ # logger.debug(">>>>>>>> Merged dictionary")
+ # logger.debug("Creating instance scenario-dict MERGED:\n%s",
+ # yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+
+
+ # 1. Creating new nets (sce_nets) in the VIM"
+ number_mgmt_networks = 0
+ db_instance_nets = []
+ db_instance_wim_nets = []
+ for sce_net in scenarioDict['nets']:
+
+ sce_net_uuid = sce_net.get('uuid', sce_net["name"])
+ # get involved datacenters where this network need to be created
+ involved_datacenters = []
+ for sce_vnf in scenarioDict.get("vnfs", ()):
+ vnf_datacenter = sce_vnf.get("datacenter", default_datacenter_id)
+ if vnf_datacenter in involved_datacenters:
+ continue
+ if sce_vnf.get("interfaces"):
+ for sce_vnf_ifaces in sce_vnf["interfaces"]:
+ if sce_vnf_ifaces.get("sce_net_id") == sce_net["uuid"]:
+ involved_datacenters.append(vnf_datacenter)
+ break
+ if not involved_datacenters:
+ involved_datacenters.append(default_datacenter_id)
+ target_wim_account = sce_net.get("wim_account", default_wim_account)
+
+ descriptor_net = {}
+ if instance_dict.get("networks"):
+ if sce_net.get("uuid") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+ descriptor_net_name = sce_net["uuid"]
+ elif sce_net.get("osm_id") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+ descriptor_net_name = sce_net["osm_id"]
+ elif sce_net["name"] in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["name"]]
+ descriptor_net_name = sce_net["name"]
+ net_name = descriptor_net.get("vim-network-name")
+ # add datacenters from instantiation parameters
+ if descriptor_net.get("sites"):
+ for site in descriptor_net["sites"]:
+ if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
+ involved_datacenters.append(site["datacenter"])
+ sce_net2instance[sce_net_uuid] = {}
+ sce_net2wim_instance[sce_net_uuid] = {}
+ net2task_id['scenario'][sce_net_uuid] = {}
+
+ use_network = None
+ related_network = None
+ if descriptor_net.get("use-network"):
+ target_instance_nets = mydb.get_rows(
+ SELECT="related",
+ FROM="instance_nets",
+ WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+ "osm_id": descriptor_net["use-network"]["osm_id"]},
+ )
+ if not target_instance_nets:
+ raise NfvoException(
+ "Cannot find the target network at instance:networks[{}]:use-network".format(
+ descriptor_net_name), httperrors.Bad_Request)
+ else:
+ use_network = target_instance_nets[0]["related"]
+
+ if sce_net["external"]:
+ number_mgmt_networks += 1
+
+ # --> WIM
+ # TODO: use this information during network creation
+ wim_account_id = wim_account_name = None
+ if len(involved_datacenters) > 1 and 'uuid' in sce_net:
+ urls = [myvims[v].url for v in involved_datacenters]
+ if len(set(urls)) < 2:
+ wim_usage[sce_net['uuid']] = False
+ elif target_wim_account is None or target_wim_account is True: # automatic selection of WIM
+ # OBS: sce_net without uuid are used internally to VNFs
+ # and the assumption is that VNFs will not be split among
+ # different datacenters
+ wim_account = wim_engine.find_suitable_wim_account(
+ involved_datacenters, tenant_id)
+ wim_account_id = wim_account['uuid']
+ wim_account_name = wim_account['name']
+ wim_usage[sce_net['uuid']] = wim_account_id
+ elif isinstance(target_wim_account, str): # manual selection of WIM
+ wim_account.persist.get_wim_account_by(target_wim_account, tenant_id)
+ wim_account_id = wim_account['uuid']
+ wim_account_name = wim_account['name']
+ wim_usage[sce_net['uuid']] = wim_account_id
+ else: # not WIM usage
+ wim_usage[sce_net['uuid']] = False
+ # <-- WIM
+
+ for datacenter_id in involved_datacenters:
+ netmap_use = None
+ netmap_create = None
+ if descriptor_net.get("sites"):
+ for site in descriptor_net["sites"]:
+ if site.get("datacenter") == datacenter_id:
+ netmap_use = site.get("netmap-use")
+ netmap_create = site.get("netmap-create")
+ break
+
+ vim = myvims[datacenter_id]
+ myvim_thread_id = myvim_threads_id[datacenter_id]
+
+ net_type = sce_net['type']
+ net_vim_name = None
+ lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'} # 'shared': True
+
+ if not net_name:
+ if sce_net["external"]:
+ net_name = sce_net["name"]
+ else:
+ net_name = "{}-{}".format(instance_name, sce_net["name"])
+ net_name = net_name[:255] # limit length
+
+ if netmap_use or netmap_create:
+ create_network = False
+ lookfor_network = False
+ if netmap_use:
+ lookfor_network = True
+ if utils.check_valid_uuid(netmap_use):
+ lookfor_filter["id"] = netmap_use
+ else:
+ lookfor_filter["name"] = netmap_use
+ if netmap_create:
+ create_network = True
+ net_vim_name = net_name
+ if isinstance(netmap_create, str):
+ net_vim_name = netmap_create
+ elif sce_net.get("vim_network_name"):
+ create_network = False
+ lookfor_network = True
+ lookfor_filter["name"] = sce_net.get("vim_network_name")
+ elif sce_net["external"]:
+ if sce_net.get('vim_id'):
+ # there is a netmap at datacenter_nets database # TODO REVISE!!!!
+ create_network = False
+ lookfor_network = True
+ lookfor_filter["id"] = sce_net['vim_id']
+ elif vim["config"].get("management_network_id") or vim["config"].get("management_network_name"):
+ if number_mgmt_networks > 1:
+ raise NfvoException("Found several VLD of type mgmt. "
+ "You must concrete what vim-network must be use for each one",
+ httperrors.Bad_Request)
+ create_network = False
+ lookfor_network = True
+ if vim["config"].get("management_network_id"):
+ lookfor_filter["id"] = vim["config"]["management_network_id"]
+ else:
+ lookfor_filter["name"] = vim["config"]["management_network_name"]
+ else:
+ # There is not a netmap, look at datacenter for a net with this name and create if not found
+ create_network = True
+ lookfor_network = True
+ lookfor_filter["name"] = sce_net["name"]
+ net_vim_name = sce_net["name"]
+ else:
+ net_vim_name = net_name
+ create_network = True
+ lookfor_network = False
+
+ task_extra = {}
+ if create_network:
+ task_action = "CREATE"
+ task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), False,
+ sce_net.get('provider_network', None), wim_account_name)
+
+ if lookfor_network:
+ task_extra["find"] = (lookfor_filter,)
+ elif lookfor_network:
+ task_action = "FIND"
+ task_extra["params"] = (lookfor_filter,)
+
+ # fill database content
+ net_uuid = str(uuid4())
+ uuid_list.append(net_uuid)
+ sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+ if not related_network: # all db_instance_nets will have same related
+ related_network = use_network or net_uuid
+ sdn_net_id = None
+ sdn_controller = vim.config.get('sdn-controller')
+ sce_net2wim_instance[sce_net_uuid][datacenter_id] = None
+ if sdn_controller and net_type in ("data", "ptp"):
+ wim_id = _get_wim(mydb, sdn_controller)
+ sdn_net_id = str(uuid4())
+ sce_net2wim_instance[sce_net_uuid][datacenter_id] = sdn_net_id
+ task_extra["sdn_net_id"] = sdn_net_id
+ db_instance_wim_nets.append({
+ "uuid": sdn_net_id,
+ "instance_scenario_id": instance_uuid,
+ "sce_net_id": sce_net.get("uuid"),
+ "wim_id": wim_id,
+ "wim_account_id": sdn_controller,
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ "related": related_network,
+ 'multipoint': True if net_type=="data" else False,
+ "created": create_network, # TODO py3
+ "sdn": True,
+ })
+
+ task_wim_extra = {"params": [net_type, wim_account_name]}
+ # add sdn interfaces
+ if sce_net.get('provider_network') and sce_net['provider_network'].get("sdn-ports"):
+ task_wim_extra["sdn-ports"] = sce_net['provider_network'].get("sdn-ports")
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "status": "SCHEDULED",
+ "task_index": task_index,
+ # "datacenter_vim_id": myvim_thread_id,
+ "wim_account_id": sdn_controller,
+ "action": task_action,
+ "item": "instance_wim_nets",
+ "item_id": sdn_net_id,
+ "related": related_network,
+ "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ db_net = {
+ "uuid": net_uuid,
+ "osm_id": sce_net.get("osm_id") or sce_net["name"],
+ "related": related_network,
+ 'vim_net_id': None,
+ "vim_name": net_vim_name,
+ "instance_scenario_id": instance_uuid,
+ "sce_net_id": sce_net.get("uuid"),
+ "created": create_network,
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ 'sdn_net_id': sdn_net_id,
+ }
+ db_instance_nets.append(db_net)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "status": "SCHEDULED",
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": task_action,
+ "item": "instance_nets",
+ "item_id": net_uuid,
+ "related": related_network,
+ "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+ }
+ net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ if 'ip_profile' in sce_net:
+ db_ip_profile={
+ 'instance_net_id': net_uuid,
+ 'ip_version': sce_net['ip_profile']['ip_version'],
+ 'subnet_address': sce_net['ip_profile']['subnet_address'],
+ 'gateway_address': sce_net['ip_profile']['gateway_address'],
+ 'dns_address': sce_net['ip_profile']['dns_address'],
+ 'dhcp_enabled': sce_net['ip_profile']['dhcp_enabled'],
+ 'dhcp_start_address': sce_net['ip_profile']['dhcp_start_address'],
+ 'dhcp_count': sce_net['ip_profile']['dhcp_count'],
+ }
+ db_ip_profiles.append(db_ip_profile)
+
+ # Create VNFs
+ vnf_params = {
+ "default_datacenter_id": default_datacenter_id,
+ "myvim_threads_id": myvim_threads_id,
+ "instance_uuid": instance_uuid,
+ "instance_name": instance_name,
+ "instance_action_id": instance_action_id,
+ "myvims": myvims,
+ "cloud_config": cloud_config,
+ "RO_pub_key": tenant[0].get('RO_pub_key'),
+ "instance_parameters": instance_dict,
+ }
+ vnf_params_out = {
+ "task_index": task_index,
+ "uuid_list": uuid_list,
+ "db_instance_nets": db_instance_nets,
+ "db_instance_wim_nets": db_instance_wim_nets,
+ "db_vim_actions": db_vim_actions,
+ "db_ip_profiles": db_ip_profiles,
+ "db_instance_vnfs": db_instance_vnfs,
+ "db_instance_vms": db_instance_vms,
+ "db_instance_interfaces": db_instance_interfaces,
+ "net2task_id": net2task_id,
+ "sce_net2instance": sce_net2instance,
+ "sce_net2wim_instance": sce_net2wim_instance,
+ }
+ # sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
+ for sce_vnf in scenarioDict.get('vnfs', ()): # sce_vnf_list:
+ instantiate_vnf(mydb, sce_vnf, vnf_params, vnf_params_out, rollbackList)
+ task_index = vnf_params_out["task_index"]
+ uuid_list = vnf_params_out["uuid_list"]
+
+ # Create VNFFGs
+ # task_depends_on = []
+ for vnffg in scenarioDict.get('vnffgs', ()):
+ for rsp in vnffg['rsps']:
+ sfs_created = []
+ for cp in rsp['connection_points']:
+ count = mydb.get_rows(
+ SELECT='vms.count',
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h "
+ "on interfaces.uuid=h.ingress_interface_id",
+ WHERE={'h.uuid': cp['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ sfis_created = []
+ for i in range(count):
+ # create sfis
+ sfi_uuid = str(uuid4())
+ extra_params = {
+ "ingress_interface_id": cp["ingress_interface_id"],
+ "egress_interface_id": cp["egress_interface_id"]
+ }
+ uuid_list.append(sfi_uuid)
+ db_sfi = {
+ "uuid": sfi_uuid,
+ "related": sfi_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfi_id": None, # vim thread will populate
+ }
+ db_instance_sfis.append(db_sfi)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi_uuid,
+ "related": sfi_uuid,
+ "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ sfis_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ # create sfs
+ sf_uuid = str(uuid4())
+ uuid_list.append(sf_uuid)
+ db_sf = {
+ "uuid": sf_uuid,
+ "related": sf_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sf_id": None, # vim thread will populate
+ }
+ db_instance_sfs.append(db_sf)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf_uuid,
+ "related": sf_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+ default_flow_style=True, width=256)
+ }
+ sfs_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ classifier = rsp['classifier']
+
+ # TODO the following ~13 lines can be reused for the sfi case
+ count = mydb.get_rows(
+ SELECT=('vms.count'),
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+ WHERE={'c.uuid': classifier['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ classifications_created = []
+ for i in range(count):
+ for match in classifier['matches']:
+ # create classifications
+ classification_uuid = str(uuid4())
+ uuid_list.append(classification_uuid)
+ db_classification = {
+ "uuid": classification_uuid,
+ "related": classification_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_classifier_match_id': match['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_classification_id": None, # vim thread will populate
+ }
+ db_instance_classifications.append(db_classification)
+ classification_params = {
+ "ip_proto": match["ip_proto"],
+ "source_ip": match["source_ip"],
+ "destination_ip": match["destination_ip"],
+ "source_port": match["source_port"],
+ "destination_port": match["destination_port"],
+ "logical_source_port": classifier["interface_id"]
+ }
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification_uuid,
+ "related": classification_uuid,
+ "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ classifications_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ # create sfps
+ sfp_uuid = str(uuid4())
+ uuid_list.append(sfp_uuid)
+ db_sfp = {
+ "uuid": sfp_uuid,
+ "related": sfp_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_id': rsp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfp_id": None, # vim thread will populate
+ }
+ db_instance_sfps.append(db_sfp)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp_uuid,
+ "related": sfp_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+ default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ db_instance_action["number_tasks"] = task_index
+
+ # --> WIM
+ logger.debug('wim_usage:\n%s\n\n', pformat(wim_usage))
+ wan_links = wim_engine.derive_wan_links(wim_usage, db_instance_nets, tenant_id)
+ wim_actions = wim_engine.create_actions(wan_links)
+ wim_actions, db_instance_action = (
+ wim_engine.incorporate_actions(wim_actions, db_instance_action))
+ # <-- WIM
+
+ scenarioDict["datacenter2tenant"] = myvim_threads_id
+
+ db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
+ db_instance_scenario['datacenter_id'] = default_datacenter_id
+ db_tables=[
+ {"instance_scenarios": db_instance_scenario},
+ {"instance_vnfs": db_instance_vnfs},
+ {"instance_nets": db_instance_nets},
+ {"ip_profiles": db_ip_profiles},
+ {"instance_vms": db_instance_vms},
+ {"instance_interfaces": db_instance_interfaces},
+ {"instance_actions": db_instance_action},
+ {"instance_sfis": db_instance_sfis},
+ {"instance_sfs": db_instance_sfs},
+ {"instance_classifications": db_instance_classifications},
+ {"instance_sfps": db_instance_sfps},
+ {"instance_wim_nets": db_instance_wim_nets + wan_links},
+ {"vim_wim_actions": db_vim_actions + wim_actions}
+ ]
+
+ logger.debug("create_instance done DB tables: %s",
+ yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+ mydb.new_rows(db_tables, uuid_list)
+ for myvim_thread_id in myvim_threads_id.values():
+ vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+
+ wim_engine.dispatch(wim_actions)
+
+ returned_instance = mydb.get_instance_scenario(instance_uuid)
+ returned_instance["action_id"] = instance_action_id
+ return returned_instance
+ except (NfvoException, vimconn.VimConnException, sdnconn.SdnConnectorError, db_base_Exception) as e:
+ _, message = rollback(mydb, myvims, rollbackList)
+ if isinstance(e, db_base_Exception):
+ error_text = "database Exception"
+ elif isinstance(e, vimconn.VimConnException):
+ error_text = "VIM Exception"
+ elif isinstance(e, sdnconn.SdnConnectorError):
+ error_text = "WIM Exception"
+ else:
+ error_text = "Exception " + str(type(e).__name__)
+ error_text += " {}. {}".format(e, message)
+ # logger.error("create_instance: %s", error_text)
+ logger.exception(e)
+ raise NfvoException(error_text, e.http_code)
+
+def increment_ip_mac(ip_mac, vm_index=1):
+ if not isinstance(ip_mac, str):
+ return ip_mac
+ try:
+ # try with ipv4 look for last dot
+ i = ip_mac.rfind(".")
+ if i > 0:
+ i += 1
+ return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
+ # try with ipv6 or mac look for last colon. Operate in hex
+ i = ip_mac.rfind(":")
+ if i > 0:
+ i += 1
+ # format in hex, len can be 2 for mac or 4 for ipv6
+ return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index)
+ except:
+ pass
+ return None
+
+
+def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
+ default_datacenter_id = params["default_datacenter_id"]
+ myvim_threads_id = params["myvim_threads_id"]
+ instance_uuid = params["instance_uuid"]
+ instance_name = params["instance_name"]
+ instance_action_id = params["instance_action_id"]
+ myvims = params["myvims"]
+ cloud_config = params["cloud_config"]
+ RO_pub_key = params["RO_pub_key"]
+
+ task_index = params_out["task_index"]
+ uuid_list = params_out["uuid_list"]
+ db_instance_nets = params_out["db_instance_nets"]
+ db_instance_wim_nets = params_out["db_instance_wim_nets"]
+ db_vim_actions = params_out["db_vim_actions"]
+ db_ip_profiles = params_out["db_ip_profiles"]
+ db_instance_vnfs = params_out["db_instance_vnfs"]
+ db_instance_vms = params_out["db_instance_vms"]
+ db_instance_interfaces = params_out["db_instance_interfaces"]
+ net2task_id = params_out["net2task_id"]
+ sce_net2instance = params_out["sce_net2instance"]
+ sce_net2wim_instance = params_out["sce_net2wim_instance"]
+
+ vnf_net2instance = {}
+ vnf_net2wim_instance = {}
+
+ # 2. Creating new nets (vnf internal nets) in the VIM"
+ # For each vnf net, we create it and we add it to instanceNetlist.
+ if sce_vnf.get("datacenter"):
+ vim = myvims[sce_vnf["datacenter"]]
+ datacenter_id = sce_vnf["datacenter"]
+ myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+ else:
+ vim = myvims[default_datacenter_id]
+ datacenter_id = default_datacenter_id
+ myvim_thread_id = myvim_threads_id[default_datacenter_id]
+ for net in sce_vnf['nets']:
+ # TODO revis
+ # descriptor_net = instance_dict.get("vnfs", {}).get(sce_vnf["name"], {})
+ # net_name = descriptor_net.get("name")
+ net_name = None
+ if not net_name:
+ net_name = "{}-{}".format(instance_name, net["name"])
+ net_name = net_name[:255] # limit length
+ net_type = net['type']
+
+ if sce_vnf['uuid'] not in vnf_net2instance:
+ vnf_net2instance[sce_vnf['uuid']] = {}
+ if sce_vnf['uuid'] not in net2task_id:
+ net2task_id[sce_vnf['uuid']] = {}
+
+ # fill database content
+ net_uuid = str(uuid4())
+ uuid_list.append(net_uuid)
+ vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
+
+ sdn_controller = vim.config.get('sdn-controller')
+ sdn_net_id = None
+ if sdn_controller and net_type in ("data", "ptp"):
+ wim_id = _get_wim(mydb, sdn_controller)
+ sdn_net_id = str(uuid4())
+ db_instance_wim_nets.append({
+ "uuid": sdn_net_id,
+ "instance_scenario_id": instance_uuid,
+ "wim_id": wim_id,
+ "wim_account_id": sdn_controller,
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ "related": net_uuid,
+ 'multipoint': True if net_type == "data" else False,
+ "created": True, # TODO py3
+ "sdn": True,
+ })
+ vnf_net2wim_instance[net_uuid] = sdn_net_id
+
+ db_net = {
+ "uuid": net_uuid,
+ "related": net_uuid,
+ 'vim_net_id': None,
+ "vim_name": net_name,
+ "instance_scenario_id": instance_uuid,
+ "net_id": net["uuid"],
+ "created": True,
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ 'sdn_net_id': sdn_net_id,
+ }
+ db_instance_nets.append(db_net)
+
+ lookfor_filter = {}
+ if net.get("vim-network-name"):
+ lookfor_filter["name"] = net["vim-network-name"]
+ if net.get("vim-network-id"):
+ lookfor_filter["id"] = net["vim-network-id"]
+ if lookfor_filter:
+ task_action = "FIND"
+ task_extra = {"params": (lookfor_filter,)}
+ else:
+ task_action = "CREATE"
+ task_extra = {"params": (net_name, net_type, net.get('ip_profile', None))}
+ if sdn_net_id:
+ task_extra["sdn_net_id"] = sdn_net_id
+
+ if sdn_net_id:
+ task_wim_extra = {"params": [net_type, None]}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "status": "SCHEDULED",
+ "task_index": task_index,
+ # "datacenter_vim_id": myvim_thread_id,
+ "wim_account_id": sdn_controller,
+ "action": task_action,
+ "item": "instance_wim_nets",
+ "item_id": sdn_net_id,
+ "related": net_uuid,
+ "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "status": "SCHEDULED",
+ "action": task_action,
+ "item": "instance_nets",
+ "item_id": net_uuid,
+ "related": net_uuid,
+ "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+ }
+ net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ if 'ip_profile' in net:
+ db_ip_profile = {
+ 'instance_net_id': net_uuid,
+ 'ip_version': net['ip_profile']['ip_version'],
+ 'subnet_address': net['ip_profile']['subnet_address'],
+ 'gateway_address': net['ip_profile']['gateway_address'],
+ 'dns_address': net['ip_profile']['dns_address'],
+ 'dhcp_enabled': net['ip_profile']['dhcp_enabled'],
+ 'dhcp_start_address': net['ip_profile']['dhcp_start_address'],
+ 'dhcp_count': net['ip_profile']['dhcp_count'],
+ }
+ db_ip_profiles.append(db_ip_profile)
+
+ # print "vnf_net2instance:"
+ # print yaml.safe_dump(vnf_net2instance, indent=4, default_flow_style=False)
+
+ # 3. Creating new vm instances in the VIM
+ # myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+ ssh_access = None
+ if sce_vnf.get('mgmt_access'):
+ ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access')
+ vnf_availability_zones = []
+ for vm in sce_vnf.get('vms'):
+ vm_av = vm.get('availability_zone')
+ if vm_av and vm_av not in vnf_availability_zones:
+ vnf_availability_zones.append(vm_av)
+
+ # check if there is enough availability zones available at vim level.
+ if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+ if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+ raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+
+ if sce_vnf.get("datacenter"):
+ vim = myvims[sce_vnf["datacenter"]]
+ myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+ datacenter_id = sce_vnf["datacenter"]
+ else:
+ vim = myvims[default_datacenter_id]
+ myvim_thread_id = myvim_threads_id[default_datacenter_id]
+ datacenter_id = default_datacenter_id
+ sce_vnf["datacenter_id"] = datacenter_id
+ i = 0
+
+ vnf_uuid = str(uuid4())
+ uuid_list.append(vnf_uuid)
+ db_instance_vnf = {
+ 'uuid': vnf_uuid,
+ 'instance_scenario_id': instance_uuid,
+ 'vnf_id': sce_vnf['vnf_id'],
+ 'sce_vnf_id': sce_vnf['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ }
+ db_instance_vnfs.append(db_instance_vnf)
+
+ for vm in sce_vnf['vms']:
+ # skip PDUs
+ if vm.get("pdu_type"):
+ continue
+
+ myVMDict = {}
+ sce_vnf_name = sce_vnf['member_vnf_index'] if sce_vnf['member_vnf_index'] else sce_vnf['name']
+ myVMDict['name'] = "{}-{}-{}".format(instance_name[:64], sce_vnf_name[:64], vm["name"][:64])
+ myVMDict['description'] = myVMDict['name'][0:99]
+ # if not startvms:
+ # myVMDict['start'] = "no"
+ if vm.get("instance_parameters") and vm["instance_parameters"].get("name"):
+ myVMDict['name'] = vm["instance_parameters"].get("name")
+ myVMDict['name'] = myVMDict['name'][0:255] # limit name length
+ # create image at vim in case it not exist
+ image_uuid = vm['image_id']
+ if vm.get("image_list"):
+ for alternative_image in vm["image_list"]:
+ if alternative_image["vim_type"] == vim["config"]["_vim_type_internal"]:
+ image_uuid = alternative_image['image_id']
+ break
+ image_dict = mydb.get_table_by_uuid_name("images", image_uuid)
+ image_id = create_or_use_image(mydb, {datacenter_id: vim}, image_dict, [], True)
+ vm['vim_image_id'] = image_id
+
+ # create flavor at vim in case it not exist
+ flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+ if flavor_dict['extended'] != None:
+ flavor_dict['extended'] = yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+ flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
+
+ # Obtain information for additional disks
+ extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',),
+ WHERE={'vim_id': flavor_id})
+ if not extended_flavor_dict:
+ raise NfvoException("flavor '{}' not found".format(flavor_id), httperrors.Not_Found)
+
+ # extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0], Loader=yaml.Loader)
+ myVMDict['disks'] = None
+ extended_info = extended_flavor_dict[0]['extended']
+ if extended_info != None:
+ extended_flavor_dict_yaml = yaml.load(extended_info, Loader=yaml.Loader)
+ if 'disks' in extended_flavor_dict_yaml:
+ myVMDict['disks'] = extended_flavor_dict_yaml['disks']
+ if vm.get("instance_parameters") and vm["instance_parameters"].get("devices"):
+ for disk in myVMDict['disks']:
+ if disk.get("name") in vm["instance_parameters"]["devices"]:
+ disk.update(vm["instance_parameters"]["devices"][disk.get("name")])
+
+ vm['vim_flavor_id'] = flavor_id
+ myVMDict['imageRef'] = vm['vim_image_id']
+ myVMDict['flavorRef'] = vm['vim_flavor_id']
+ myVMDict['availability_zone'] = vm.get('availability_zone')
+ myVMDict['networks'] = []
+ task_depends_on = []
+ # TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+ is_management_vm = False
+ db_vm_ifaces = []
+ for iface in vm['interfaces']:
+ netDict = {}
+ if iface['type'] == "data":
+ netDict['type'] = iface['model']
+ elif "model" in iface and iface["model"] != None:
+ netDict['model'] = iface['model']
+ # TODO in future, remove this because mac_address will not be set, and the type of PV,VF
+ # is obtained from iterface table model
+ # discover type of interface looking at flavor
+ for numa in flavor_dict.get('extended', {}).get('numas', []):
+ for flavor_iface in numa.get('interfaces', []):
+ if flavor_iface.get('name') == iface['internal_name']:
+ if flavor_iface['dedicated'] == 'yes':
+ netDict['type'] = "PF" # passthrough
+ elif flavor_iface['dedicated'] == 'no':
+ netDict['type'] = "VF" # siov
+ elif flavor_iface['dedicated'] == 'yes:sriov':
+ netDict['type'] = "VFnotShared" # sriov but only one sriov on the PF
+ netDict["mac_address"] = flavor_iface.get("mac_address")
+ break
+ netDict["use"] = iface['type']
+ if netDict["use"] == "data" and not netDict.get("type"):
+ # print "netDict", netDict
+ # print "iface", iface
+ e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".fromat(
+ sce_vnf['name'], vm['name'], iface['internal_name'])
+ if flavor_dict.get('extended') == None:
+ raise NfvoException(e_text + "After database migration some information is not available. \
+ Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+ else:
+ raise NfvoException(e_text, httperrors.Internal_Server_Error)
+ if netDict["use"] == "mgmt":
+ is_management_vm = True
+ netDict["type"] = "virtual"
+ if netDict["use"] == "bridge":
+ netDict["type"] = "virtual"
+ if iface.get("vpci"):
+ netDict['vpci'] = iface['vpci']
+ if iface.get("mac"):
+ netDict['mac_address'] = iface['mac']
+ if iface.get("mac_address"):
+ netDict['mac_address'] = iface['mac_address']
+ if iface.get("ip_address"):
+ netDict['ip_address'] = iface['ip_address']
+ if iface.get("port-security") is not None:
+ netDict['port_security'] = iface['port-security']
+ if iface.get("port_security_disable_strategy") is not None:
+ netDict['port_security_disable_strategy'] = iface['port_security_disable_strategy']
+ if iface.get("floating-ip") is not None:
+ netDict['floating_ip'] = iface['floating-ip']
+ netDict['name'] = iface['internal_name']
+ if iface['net_id'] is None:
+ for vnf_iface in sce_vnf["interfaces"]:
+ # print iface
+ # print vnf_iface
+ if vnf_iface['interface_id'] == iface['uuid']:
+ netDict['net_id'] = "TASK-{}".format(
+ net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+ instance_net_id = sce_net2instance[vnf_iface['sce_net_id']][datacenter_id]
+ instance_wim_net_id = sce_net2wim_instance[vnf_iface['sce_net_id']][datacenter_id]
+ task_depends_on.append(net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+ break
+ else:
+ netDict['net_id'] = "TASK-{}".format(net2task_id[sce_vnf['uuid']][iface['net_id']])
+ instance_net_id = vnf_net2instance[sce_vnf['uuid']][iface['net_id']]
+ instance_wim_net_id = vnf_net2wim_instance.get(instance_net_id)
+ task_depends_on.append(net2task_id[sce_vnf['uuid']][iface['net_id']])
+ # skip bridge ifaces not connected to any net
+ if 'net_id' not in netDict or netDict['net_id'] == None:
+ continue
+ myVMDict['networks'].append(netDict)
+ db_vm_iface = {
+ # "uuid"
+ # 'instance_vm_id': instance_vm_uuid,
+ "instance_net_id": instance_net_id,
+ "instance_wim_net_id": instance_wim_net_id,
+ 'interface_id': iface['uuid'],
+ # 'vim_interface_id': ,
+ 'type': 'external' if iface['external_name'] is not None else 'internal',
+ 'model': iface['model'],
+ 'ip_address': iface.get('ip_address'),
+ 'mac_address': iface.get('mac'),
+ 'floating_ip': int(iface.get('floating-ip', False)),
+ 'port_security': int(iface.get('port-security', True))
+ }
+ db_vm_ifaces.append(db_vm_iface)
+ # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ # print myVMDict['name']
+ # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+ # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+ # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+
+ # We add the RO key to cloud_config if vnf will need ssh access
+ cloud_config_vm = cloud_config
+ if is_management_vm and params["instance_parameters"].get("mgmt_keys"):
+ cloud_config_vm = unify_cloud_config({"key-pairs": params["instance_parameters"]["mgmt_keys"]},
+ cloud_config_vm)
+
+ if vm.get("instance_parameters") and "mgmt_keys" in vm["instance_parameters"]:
+ if vm["instance_parameters"]["mgmt_keys"]:
+ cloud_config_vm = unify_cloud_config({"key-pairs": vm["instance_parameters"]["mgmt_keys"]},
+ cloud_config_vm)
+ if RO_pub_key:
+ cloud_config_vm = unify_cloud_config(cloud_config_vm, {"key-pairs": [RO_pub_key]})
+ if vm.get("boot_data"):
+ cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm)
+
+ if myVMDict.get('availability_zone'):
+ av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+ else:
+ av_index = None
+ for vm_index in range(0, vm.get('count', 1)):
+ if vm.get("instance_parameters") and vm["instance_parameters"].get("cloud_init"):
+ cloud_config_vm_ = unify_cloud_config(cloud_config_vm,
+ {"user-data": vm["instance_parameters"]["cloud_init"][vm_index]})
+ else:
+ cloud_config_vm_ = cloud_config_vm
+
+ vm_name = myVMDict['name'] + "-" + str(vm_index+1)
+ vm_networks = deepcopy(myVMDict['networks'])
+ task_params = (vm_name, myVMDict['description'], myVMDict.get('start', None),
+ myVMDict['imageRef'], myVMDict['flavorRef'], vm_networks, cloud_config_vm_,
+ myVMDict['disks'], av_index, vnf_availability_zones)
+
+ vm_uuid = str(uuid4())
+ uuid_list.append(vm_uuid)
+ db_vm = {
+ "uuid": vm_uuid,
+ "related": vm_uuid,
+ 'instance_vnf_id': vnf_uuid,
+ # TODO delete "vim_vm_id": vm_id,
+ "vm_id": vm["uuid"],
+ "vim_name": vm_name,
+ # "status":
+ }
+ db_instance_vms.append(db_vm)
+
+ # put interface uuid back to scenario[vnfs][vms[[interfaces]
+ for net in vm_networks:
+ if "vim_id" in net:
+ for iface in vm['interfaces']:
+ if net["name"] == iface["internal_name"]:
+ iface["vim_id"] = net["vim_id"]
+ break
+
+ if vm_index > 0:
+ if net.get("ip_address"):
+ net["ip_address"] = increment_ip_mac(net.get("ip_address"), vm_index)
+ if net.get("mac_address"):
+ net["mac_address"] = increment_ip_mac(net.get("mac_address"), vm_index)
+
+ for iface_index, db_vm_iface in enumerate(db_vm_ifaces):
+ iface_uuid = str(uuid4())
+ uuid_list.append(iface_uuid)
+ db_vm_iface_instance = {
+ "uuid": iface_uuid,
+ "instance_vm_id": vm_uuid,
+ "ip_address": vm_networks[iface_index].get("ip_address"),
+ "mac_address": vm_networks[iface_index].get("mac_address")
+ }
+ db_vm_iface_instance.update(db_vm_iface)
+ db_instance_interfaces.append(db_vm_iface_instance)
+ vm_networks[iface_index]["uuid"] = iface_uuid
+
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vm_uuid,
+ "related": vm_uuid,
+ "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
+ default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ params_out["task_index"] = task_index
+ params_out["uuid_list"] = uuid_list
+
+
+def delete_instance(mydb, tenant_id, instance_id):
+ # print "Checking that the instance_id exists and getting the instance dictionary"
+ instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
+ # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+ tenant_id = instanceDict["tenant_id"]
+
+ # --> WIM
+ # We need to retrieve the WIM Actions now, before the instance_scenario is
+ # deleted. The reason for that is that: ON CASCADE rules will delete the
+ # instance_wim_nets record in the database
+ wim_actions = wim_engine.delete_actions(instance_scenario_id=instance_id)
+ # <-- WIM
+
+ # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ # 1. Delete from Database
+ message = mydb.delete_instance_scenario(instance_id, tenant_id)
+
+ # 2. delete from VIM
+ error_msg = ""
+ myvims = {}
+ myvim_threads = {}
+ vimthread_affected = {}
+ net2vm_dependencies = {}
+
+ task_index = 0
+ instance_action_id = get_task_id()
+ db_vim_actions = []
+ db_instance_action = {
+ "uuid": instance_action_id, # same uuid for the instance and the action on create
+ "tenant_id": tenant_id,
+ "instance_id": instance_id,
+ "description": "DELETE",
+ # "number_tasks": 0 # filled bellow
+ }
+
+ # 2.1 deleting VNFFGs
+ for sfp in instanceDict.get('sfps', ()):
+ vimthread_affected[sfp["datacenter_tenant_id"]] = None
+ datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+ datacenter_tenant_id=sfp["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+ continue
+ extra = {"params": (sfp['vim_sfp_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfp["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp["uuid"],
+ "related": sfp["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for classification in instanceDict['classifications']:
+ vimthread_affected[classification["datacenter_tenant_id"]] = None
+ datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+ datacenter_tenant_id=classification["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"],
+ classification["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'],
+ classification["datacenter_id"])
+ continue
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+ extra = {"params": (classification['vim_classification_id']), "depends_on": depends_on}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": classification["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification["uuid"],
+ "related": classification["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sf in instanceDict.get('sfs', ()):
+ vimthread_affected[sf["datacenter_tenant_id"]] = None
+ datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+ datacenter_tenant_id=sf["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+ continue
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+ extra = {"params": (sf['vim_sf_id']), "depends_on": depends_on}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sf["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf["uuid"],
+ "related": sf["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sfi in instanceDict.get('sfis', ()):
+ vimthread_affected[sfi["datacenter_tenant_id"]] = None
+ datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+ datacenter_tenant_id=sfi["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+ continue
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfs"]
+ extra = {"params": (sfi['vim_sfi_id']), "depends_on": depends_on}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfi["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi["uuid"],
+ "related": sfi["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ # 2.2 deleting VMs
+ # vm_fail_list=[]
+ for sce_vnf in instanceDict.get('vnfs', ()):
+ datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
+ if datacenter_key not in myvims:
+ try:
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
+ datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
+ sce_vnf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ for vm in sce_vnf['vms']:
+ if not myvim:
+ error_msg += "\n VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+ continue
+ sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vm["uuid"],
+ "related": vm["related"],
+ "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
+ default_flow_style=True, width=256)
+ }
+ db_vim_actions.append(db_vim_action)
+ for interface in vm["interfaces"]:
+ if not interface.get("instance_net_id"):
+ continue
+ if interface["instance_net_id"] not in net2vm_dependencies:
+ net2vm_dependencies[interface["instance_net_id"]] = []
+ net2vm_dependencies[interface["instance_net_id"]].append(task_index)
+ task_index += 1
+
+ # 2.3 deleting NETS
+ # net_fail_list=[]
+ for net in instanceDict['nets']:
+ vimthread_affected[net["datacenter_tenant_id"]] = None
+ datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, net["datacenter_id"], net["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
+ datacenter_tenant_id=net["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = next(iter(vims.values()))
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
+ continue
+ extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
+ if net2vm_dependencies.get(net["uuid"]):
+ extra["depends_on"] = net2vm_dependencies[net["uuid"]]
+ sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+ if len(sfi_dependencies) > 0:
+ if "depends_on" in extra:
+ extra["depends_on"] += sfi_dependencies
+ else:
+ extra["depends_on"] = sfi_dependencies
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": net["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_nets",
+ "item_id": net["uuid"],
+ "related": net["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ for sdn_net in instanceDict['sdn_nets']:
+ if not sdn_net["sdn"]:
+ continue
+ extra = {}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "wim_account_id": sdn_net["wim_account_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_wim_nets",
+ "item_id": sdn_net["uuid"],
+ "related": sdn_net["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ db_instance_action["number_tasks"] = task_index
+
+ # --> WIM
+ wim_actions, db_instance_action = (
+ wim_engine.incorporate_actions(wim_actions, db_instance_action))
+ # <-- WIM
+
+ db_tables = [
+ {"instance_actions": db_instance_action},
+ {"vim_wim_actions": db_vim_actions + wim_actions}
+ ]
+
+ logger.debug("delete_instance done DB tables: %s",
+ yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+ mydb.new_rows(db_tables, ())
+ for myvim_thread_id in vimthread_affected.keys():
+ vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+
+ wim_engine.dispatch(wim_actions)
+
+ if len(error_msg) > 0:
+ return 'action_id={} instance {} deleted but some elements could not be deleted, or already deleted '\
+ '(error: 404) from VIM: {}'.format(instance_action_id, message, error_msg)
+ else:
+ return "action_id={} instance {} deleted".format(instance_action_id, message)
+
+def get_instance_id(mydb, tenant_id, instance_id):
+ global ovim
+ #check valid tenant_id
+ check_tenant(mydb, tenant_id)
+ #obtain data
+
+ instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
+ # TODO py3
+ # for net in instance_dict["nets"]:
+ # if net.get("sdn_net_id"):
+ # net_sdn = ovim.show_network(net["sdn_net_id"])
+ # net["sdn_info"] = {
+ # "admin_state_up": net_sdn.get("admin_state_up"),
+ # "flows": net_sdn.get("flows"),
+ # "last_error": net_sdn.get("last_error"),
+ # "ports": net_sdn.get("ports"),
+ # "type": net_sdn.get("type"),
+ # "status": net_sdn.get("status"),
+ # "vlan": net_sdn.get("vlan"),
+ # }
+ return instance_dict
+
+@deprecated("Instance is automatically refreshed by vim_threads")
+def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
+ '''Refreshes a scenario instance. It modifies instanceDict'''
+ '''Returns:
+ - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
+ - error_msg
+ '''
+ # # Assumption: nfvo_tenant and instance_id were checked before entering into this function
+ # #print "nfvo.refresh_instance begins"
+ # #print json.dumps(instanceDict, indent=4)
+ #
+ # #print "Getting the VIM URL and the VIM tenant_id"
+ # myvims={}
+ #
+ # # 1. Getting VIM vm and net list
+ # vms_updated = [] #List of VM instance uuids in openmano that were updated
+ # vms_notupdated=[]
+ # vm_list = {}
+ # for sce_vnf in instanceDict['vnfs']:
+ # datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ # if datacenter_key not in vm_list:
+ # vm_list[datacenter_key] = []
+ # if datacenter_key not in myvims:
+ # vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
+ # datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+ # if len(vims) == 0:
+ # logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
+ # myvims[datacenter_key] = None
+ # else:
+ # myvims[datacenter_key] = next(iter(vims.values()))
+ # for vm in sce_vnf['vms']:
+ # vm_list[datacenter_key].append(vm['vim_vm_id'])
+ # vms_notupdated.append(vm["uuid"])
+ #
+ # nets_updated = [] #List of VM instance uuids in openmano that were updated
+ # nets_notupdated=[]
+ # net_list = {}
+ # for net in instanceDict['nets']:
+ # datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+ # if datacenter_key not in net_list:
+ # net_list[datacenter_key] = []
+ # if datacenter_key not in myvims:
+ # vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
+ # datacenter_tenant_id=net["datacenter_tenant_id"])
+ # if len(vims) == 0:
+ # logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+ # myvims[datacenter_key] = None
+ # else:
+ # myvims[datacenter_key] = next(iter(vims.values()))
+ #
+ # net_list[datacenter_key].append(net['vim_net_id'])
+ # nets_notupdated.append(net["uuid"])
+ #
+ # # 1. Getting the status of all VMs
+ # vm_dict={}
+ # for datacenter_key in myvims:
+ # if not vm_list.get(datacenter_key):
+ # continue
+ # failed = True
+ # failed_message=""
+ # if not myvims[datacenter_key]:
+ # failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+ # else:
+ # try:
+ # vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
+ # failed = False
+ # except vimconn.VimConnException as e:
+ # logger.error("VIM exception %s %s", type(e).__name__, str(e))
+ # failed_message = str(e)
+ # if failed:
+ # for vm in vm_list[datacenter_key]:
+ # vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+ #
+ # # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
+ # for sce_vnf in instanceDict['vnfs']:
+ # for vm in sce_vnf['vms']:
+ # vm_id = vm['vim_vm_id']
+ # interfaces = vm_dict[vm_id].pop('interfaces', [])
+ # #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
+ # has_mgmt_iface = False
+ # for iface in vm["interfaces"]:
+ # if iface["type"]=="mgmt":
+ # has_mgmt_iface = True
+ # if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
+ # vm_dict[vm_id]['status'] = "ACTIVE"
+ # if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
+ # vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
+ # if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
+ # vm['status'] = vm_dict[vm_id]['status']
+ # vm['error_msg'] = vm_dict[vm_id].get('error_msg')
+ # vm['vim_info'] = vm_dict[vm_id].get('vim_info')
+ # # 2.1. Update in openmano DB the VMs whose status changed
+ # try:
+ # updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
+ # vms_notupdated.remove(vm["uuid"])
+ # if updates>0:
+ # vms_updated.append(vm["uuid"])
+ # except db_base_Exception as e:
+ # logger.error("nfvo.refresh_instance error database update: %s", str(e))
+ # # 2.2. Update in openmano DB the interface VMs
+ # for interface in interfaces:
+ # #translate from vim_net_id to instance_net_id
+ # network_id_list=[]
+ # for net in instanceDict['nets']:
+ # if net["vim_net_id"] == interface["vim_net_id"]:
+ # network_id_list.append(net["uuid"])
+ # if not network_id_list:
+ # continue
+ # del interface["vim_net_id"]
+ # try:
+ # for network_id in network_id_list:
+ # mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
+ # except db_base_Exception as e:
+ # logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
+ #
+ # # 3. Getting the status of all nets
+ # net_dict = {}
+ # for datacenter_key in myvims:
+ # if not net_list.get(datacenter_key):
+ # continue
+ # failed = True
+ # failed_message = ""
+ # if not myvims[datacenter_key]:
+ # failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+ # else:
+ # try:
+ # net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
+ # failed = False
+ # except vimconn.VimConnException as e:
+ # logger.error("VIM exception %s %s", type(e).__name__, str(e))
+ # failed_message = str(e)
+ # if failed:
+ # for net in net_list[datacenter_key]:
+ # net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+ #
+ # # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
+ # # TODO: update nets inside a vnf
+ # for net in instanceDict['nets']:
+ # net_id = net['vim_net_id']
+ # if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
+ # net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
+ # if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
+ # net['status'] = net_dict[net_id]['status']
+ # net['error_msg'] = net_dict[net_id].get('error_msg')
+ # net['vim_info'] = net_dict[net_id].get('vim_info')
+ # # 5.1. Update in openmano DB the nets whose status changed
+ # try:
+ # updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
+ # nets_notupdated.remove(net["uuid"])
+ # if updated>0:
+ # nets_updated.append(net["uuid"])
+ # except db_base_Exception as e:
+ # logger.error("nfvo.refresh_instance error database update: %s", str(e))
+ #
+ # # Returns appropriate output
+ # #print "nfvo.refresh_instance finishes"
+ # logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
+ # str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
+ instance_id = instanceDict['uuid']
+ # if len(vms_notupdated)+len(nets_notupdated)>0:
+ # error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
+ # return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
+
+ return 0, 'Scenario instance ' + instance_id + ' refreshed.'
+
+def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
+ #print "Checking that the instance_id exists and getting the instance dictionary"
+ instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
+ #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+
+ #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+ vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
+ if len(vims) == 0:
+ raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), httperrors.Not_Found)
+ myvim = next(iter(vims.values()))
+ vm_result = {}
+ vm_error = 0
+ vm_ok = 0
+
+ myvim_threads_id = {}
+ if action_dict.get("vdu-scaling"):
+ db_instance_vms = []
+ db_vim_actions = []
+ db_instance_interfaces = []
+ instance_action_id = get_task_id()
+ db_instance_action = {
+ "uuid": instance_action_id, # same uuid for the instance and the action on create
+ "tenant_id": nfvo_tenant,
+ "instance_id": instance_id,
+ "description": "SCALE",
+ }
+ vm_result["instance_action_id"] = instance_action_id
+ vm_result["created"] = []
+ vm_result["deleted"] = []
+ task_index = 0
+ for vdu in action_dict["vdu-scaling"]:
+ vdu_id = vdu.get("vdu-id")
+ osm_vdu_id = vdu.get("osm_vdu_id")
+ member_vnf_index = vdu.get("member-vnf-index")
+ vdu_count = vdu.get("count", 1)
+ if vdu_id:
+ target_vms = mydb.get_rows(
+ FROM="instance_vms as vms join instance_vnfs as vnfs on vms.instance_vnf_id=vnfs.uuid",
+ WHERE={"vms.uuid": vdu_id},
+ ORDER_BY="vms.created_at"
+ )
+ if not target_vms:
+ raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
+ else:
+ if not osm_vdu_id and not member_vnf_index:
+ raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
+ target_vms = mydb.get_rows(
+ # SELECT=("ivms.uuid", "ivnfs.datacenter_id", "ivnfs.datacenter_tenant_id"),
+ FROM="instance_vms as ivms join instance_vnfs as ivnfs on ivms.instance_vnf_id=ivnfs.uuid"\
+ " join sce_vnfs as svnfs on ivnfs.sce_vnf_id=svnfs.uuid"\
+ " join vms on ivms.vm_id=vms.uuid",
+ WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index,
+ "ivnfs.instance_scenario_id": instance_id},
+ ORDER_BY="ivms.created_at"
+ )
+ if not target_vms:
+ raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
+ vdu_id = target_vms[-1]["uuid"]
+ target_vm = target_vms[-1]
+ datacenter = target_vm["datacenter_id"]
+ myvim_threads_id[datacenter], _ = get_vim_thread(mydb, nfvo_tenant, datacenter)
+
+ if vdu["type"] == "delete":
+ for index in range(0, vdu_count):
+ target_vm = target_vms[-1-index]
+ vdu_id = target_vm["uuid"]
+ # look for nm
+ vm_interfaces = None
+ for sce_vnf in instanceDict['vnfs']:
+ for vm in sce_vnf['vms']:
+ if vm["uuid"] == vdu_id:
+ # TODO revise this should not be vm["uuid"] instance_vms["vm_id"]
+ vm_interfaces = vm["interfaces"]
+ break
+
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": target_vm["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vdu_id,
+ "related": target_vm["related"],
+ "extra": yaml.safe_dump({"params": vm_interfaces},
+ default_flow_style=True, width=256)
+ }
+ # get affected instance_interfaces (deleted on cascade) to check if a wim_network must be updated
+ deleted_interfaces = mydb.get_rows(
+ SELECT=("instance_wim_net_id", ),
+ FROM="instance_interfaces",
+ WHERE={"instance_vm_id": vdu_id, "instance_wim_net_id<>": None},
+ )
+ for deleted_interface in deleted_interfaces:
+ db_vim_actions.append({"TO-UPDATE": {}, "WHERE": {
+ "item": "instance_wim_nets", "item_id": deleted_interface["instance_wim_net_id"]}})
+
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ vm_result["deleted"].append(vdu_id)
+ # delete from database
+ db_instance_vms.append({"TO-DELETE": vdu_id})
+
+ else: # vdu["type"] == "create":
+ iface2iface = {}
+ where = {"item": "instance_vms", "item_id": target_vm["uuid"], "action": "CREATE"}
+
+ vim_action_to_clone = mydb.get_rows(FROM="vim_wim_actions", WHERE=where)
+ if not vim_action_to_clone:
+ raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
+ vim_action_to_clone = vim_action_to_clone[0]
+ extra = yaml.safe_load(vim_action_to_clone["extra"])
+
+ # generate a new depends_on. Convert format TASK-Y into new format TASK-ACTION-XXXX.XXXX.Y
+ # TODO do the same for flavor and image when available
+ task_depends_on = []
+ task_params = extra["params"]
+ for iface in task_params[5]:
+ if iface["net_id"].startswith("TASK-"):
+ if "." not in iface["net_id"]:
+ task_depends_on.append("{}.{}".format(vim_action_to_clone["instance_action_id"],
+ iface["net_id"][5:]))
+ iface["net_id"] = "TASK-{}.{}".format(vim_action_to_clone["instance_action_id"],
+ iface["net_id"][5:])
+ else:
+ task_depends_on.append(iface["net_id"][5:])
+
+ vm_ifaces_to_clone = mydb.get_rows(FROM="instance_interfaces", WHERE={"instance_vm_id": target_vm["uuid"]})
+ for index in range(0, vdu_count):
+ vm_uuid = str(uuid4())
+ vm_name = target_vm.get('vim_name')
+ try:
+ suffix = vm_name.rfind("-")
+ vm_name = vm_name[:suffix+1] + str(index + 1 + int(vm_name[suffix+1:]))
+ except Exception:
+ pass
+ db_instance_vm = {
+ "uuid": vm_uuid,
+ 'related': vm_uuid,
+ 'instance_vnf_id': target_vm['instance_vnf_id'],
+ 'vm_id': target_vm['vm_id'],
+ 'vim_name': vm_name,
+ }
+ db_instance_vms.append(db_instance_vm)
+
+ for vm_iface in vm_ifaces_to_clone:
+ iface_uuid = str(uuid4())
+ iface2iface[vm_iface["uuid"]] = iface_uuid
+ db_vm_iface = {
+ "uuid": iface_uuid,
+ 'instance_vm_id': vm_uuid,
+ "instance_net_id": vm_iface["instance_net_id"],
+ "instance_wim_net_id": vm_iface["instance_wim_net_id"],
+ 'interface_id': vm_iface['interface_id'],
+ 'type': vm_iface['type'],
+ 'model': vm_iface['model'],
+ 'floating_ip': vm_iface['floating_ip'],
+ 'port_security': vm_iface['port_security']
+ }
+ db_instance_interfaces.append(db_vm_iface)
+ if db_vm_iface["instance_wim_net_id"]:
+ db_vim_actions.append({"TO-UPDATE": {}, "WHERE": {
+ "item": "instance_wim_nets", "item_id": db_vm_iface["instance_wim_net_id"]}})
+ task_params_copy = deepcopy(task_params)
+ cloud_config_vm = task_params_copy[6] or {}
+ if vdu.get("cloud_init"):
+ cloud_config_vm.pop("user-data", None)
+ cloud_config_vm_ = unify_cloud_config(cloud_config_vm, {"user-data": vdu["cloud_init"][index]})
+ task_params_copy[6] = cloud_config_vm_
+ for iface in task_params_copy[5]:
+ iface["uuid"] = iface2iface[iface["uuid"]]
+ # increment ip_address
+ if iface.get("ip_address"):
+ iface["ip_address"] = increment_ip_mac(iface.get("ip_address"), index+1)
+ if iface.get("mac_address"):
+ iface["mac_address"] = increment_ip_mac(iface.get("mac_address"), index+1)
+
+ if vm_name:
+ task_params_copy[0] = vm_name
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": vim_action_to_clone["datacenter_vim_id"],
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vm_uuid,
+ "related": vm_uuid,
+ # ALF
+ # ALF
+ # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
+ # ALF
+ # ALF
+ "extra": yaml.safe_dump({"params": task_params_copy, "depends_on": task_depends_on}, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ vm_result["created"].append(vm_uuid)
+
+ db_instance_action["number_tasks"] = task_index
+ db_tables = [
+ {"instance_vms": db_instance_vms},
+ {"instance_interfaces": db_instance_interfaces},
+ {"instance_actions": db_instance_action},
+ # TODO revise sfps
+ # {"instance_sfis": db_instance_sfis},
+ # {"instance_sfs": db_instance_sfs},
+ # {"instance_classifications": db_instance_classifications},
+ # {"instance_sfps": db_instance_sfps},
+ {"vim_wim_actions": db_vim_actions}
+ ]
+ logger.debug("create_vdu done DB tables: %s",
+ yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+ mydb.new_rows(db_tables, [])
+ for myvim_thread in myvim_threads_id.values():
+ vim_threads["running"][myvim_thread].insert_task(db_vim_actions)
+
+ return vm_result
+
+ input_vnfs = action_dict.pop("vnfs", [])
+ input_vms = action_dict.pop("vms", [])
+ action_over_all = True if not input_vnfs and not input_vms else False
+ for sce_vnf in instanceDict['vnfs']:
+ for vm in sce_vnf['vms']:
+ if not action_over_all and sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
+ sce_vnf['member_vnf_index'] not in input_vnfs and \
+ vm['uuid'] not in input_vms and vm['name'] not in input_vms and \
+ sce_vnf['member_vnf_index'] + "-" + vm['vdu_osm_id'] not in input_vms: # TODO conside vm_count_index
+ continue
+ try:
+ if "add_public_key" in action_dict:
+ if sce_vnf.get('mgmt_access'):
+ mgmt_access = yaml.load(sce_vnf['mgmt_access'], Loader=yaml.Loader)
+ if not input_vms and mgmt_access.get("vdu-id") != vm['vdu_osm_id']:
+ continue
+ default_user = mgmt_access.get("default-user")
+ password = mgmt_access.get("password")
+ if mgmt_access.get(vm['vdu_osm_id']):
+ default_user = mgmt_access[vm['vdu_osm_id']].get("default-user", default_user)
+ password = mgmt_access[vm['vdu_osm_id']].get("password", password)
+
+ tenant = mydb.get_rows_by_id('nfvo_tenants', nfvo_tenant)
+ try:
+ if 'ip_address' in vm:
+ mgmt_ip = vm['ip_address'].split(';')
+ priv_RO_key = decrypt_key(tenant[0]['encrypted_RO_priv_key'], tenant[0]['uuid'])
+ data = myvim.inject_user_key(mgmt_ip[0], action_dict.get('user', default_user),
+ action_dict['add_public_key'],
+ password=password, ro_key=priv_RO_key)
+ vm_result[ vm['uuid'] ] = {"vim_result": 200,
+ "description": "Public key injected",
+ "name":vm['name']
+ }
+ except KeyError:
+ raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+ httperrors.Internal_Server_Error)
+ else:
+ raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+ httperrors.Internal_Server_Error)
+ else:
+ data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
+ if "console" in action_dict:
+ if not global_config["http_console_proxy"]:
+ vm_result[ vm['uuid'] ] = {"vim_result": 200,
+ "description": "{protocol}//{ip}:{port}/{suffix}".format(
+ protocol=data["protocol"],
+ ip = data["server"],
+ port = data["port"],
+ suffix = data["suffix"]),
+ "name":vm['name']
+ }
+ vm_ok +=1
+ elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
+ vm_result[ vm['uuid'] ] = {"vim_result": -httperrors.Unauthorized,
+ "description": "this console is only reachable by local interface",
+ "name":vm['name']
+ }
+ vm_error+=1
+ else:
+ #print "console data", data
+ try:
+ console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
+ vm_result[ vm['uuid'] ] = {"vim_result": 200,
+ "description": "{protocol}//{ip}:{port}/{suffix}".format(
+ protocol=data["protocol"],
+ ip = global_config["http_console_host"],
+ port = console_thread.port,
+ suffix = data["suffix"]),
+ "name":vm['name']
+ }
+ vm_ok +=1
+ except NfvoException as e:
+ vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+ vm_error+=1
+
+ else:
+ vm_result[ vm['uuid'] ] = {"vim_result": 200, "description": "ok", "name":vm['name']}
+ vm_ok +=1
+ except vimconn.VimConnException as e:
+ vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+ vm_error+=1
+
+ if vm_ok==0: #all goes wrong
+ return vm_result
+ else:
+ return vm_result
+
+def instance_action_get(mydb, nfvo_tenant, instance_id, action_id):
+ filter = {}
+ if nfvo_tenant and nfvo_tenant != "any":
+ filter["tenant_id"] = nfvo_tenant
+ if instance_id and instance_id != "any":
+ filter["instance_id"] = instance_id
+ if action_id:
+ filter["uuid"] = action_id
+ rows = mydb.get_rows(FROM="instance_actions", WHERE=filter)
+ if action_id:
+ if not rows:
+ raise NfvoException("Not found any action with this criteria", httperrors.Not_Found)
+ vim_wim_actions = mydb.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": action_id})
+ rows[0]["vim_wim_actions"] = vim_wim_actions
+ # for backward compatibility set vim_actions = vim_wim_actions
+ rows[0]["vim_actions"] = vim_wim_actions
+ return {"actions": rows}
+
+
+def create_or_use_console_proxy_thread(console_server, console_port):
+ #look for a non-used port
+ console_thread_key = console_server + ":" + str(console_port)
+ if console_thread_key in global_config["console_thread"]:
+ #global_config["console_thread"][console_thread_key].start_timeout()
+ return global_config["console_thread"][console_thread_key]
+
+ for port in global_config["console_port_iterator"]():
+ #print "create_or_use_console_proxy_thread() port:", port
+ if port in global_config["console_ports"]:
+ continue
+ try:
+ clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
+ clithread.start()
+ global_config["console_thread"][console_thread_key] = clithread
+ global_config["console_ports"][port] = console_thread_key
+ return clithread
+ except cli.ConsoleProxyExceptionPortUsed as e:
+ #port used, try with onoher
+ continue
+ except cli.ConsoleProxyException as e:
+ raise NfvoException(str(e), httperrors.Bad_Request)
+ raise NfvoException("Not found any free 'http_console_ports'", httperrors.Conflict)
+
+
+def check_tenant(mydb, tenant_id):
+ '''check that tenant exists at database'''
+ tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
+ if not tenant:
+ raise NfvoException("tenant '{}' not found".format(tenant_id), httperrors.Not_Found)
+ return
+
+def new_tenant(mydb, tenant_dict):
+
+ tenant_uuid = str(uuid4())
+ tenant_dict['uuid'] = tenant_uuid
+ try:
+ pub_key, priv_key = create_RO_keypair(tenant_uuid)
+ tenant_dict['RO_pub_key'] = pub_key
+ tenant_dict['encrypted_RO_priv_key'] = priv_key
+ mydb.new_row("nfvo_tenants", tenant_dict, confidential_data=True)
+ except db_base_Exception as e:
+ raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), e.http_code)
+ return tenant_uuid
+
+def delete_tenant(mydb, tenant):
+ #get nfvo_tenant info
+
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
+ mydb.delete_row_by_id("nfvo_tenants", tenant_dict['uuid'])
+ return tenant_dict['uuid'] + " " + tenant_dict["name"]
+
+
+def new_datacenter(mydb, datacenter_descriptor):
+ sdn_port_mapping = None
+ if "config" in datacenter_descriptor:
+ sdn_port_mapping = datacenter_descriptor["config"].pop("sdn-port-mapping", None)
+ datacenter_descriptor["config"] = yaml.safe_dump(datacenter_descriptor["config"], default_flow_style=True,
+ width=256)
+ # Check that datacenter-type is correct
+ datacenter_type = datacenter_descriptor.get("type", "openvim");
+ # module_info = None
+
+ for url_field in ('vim_url', 'vim_url_admin'):
+ # It is common that users copy and paste the URL from the VIM website
+ # (example OpenStack), therefore a common mistake is to include blank
+ # characters at the end of the URL. Let's remove it and just in case,
+ # lets remove trailing slash as well.
+ url = datacenter_descriptor.get(url_field)
+ if url:
+ datacenter_descriptor[url_field] = url.strip(string.whitespace + '/')
+
+ # load plugin
+ plugin_name = "rovim_" + datacenter_type
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="vim")
+
+ datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
+ if sdn_port_mapping:
+ try:
+ datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, sdn_port_mapping)
+ except Exception as e:
+ mydb.delete_row_by_id("datacenters", datacenter_id) # Rollback
+ raise e
+ return datacenter_id
+
+
+def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
+ # obtain data, check that only one exist
+ datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
+
+ # edit data
+ datacenter_id = datacenter['uuid']
+ where = {'uuid': datacenter['uuid']}
+ remove_port_mapping = False
+ new_sdn_port_mapping = None
+ if "config" in datacenter_descriptor:
+ if datacenter_descriptor['config'] != None:
+ try:
+ new_config_dict = datacenter_descriptor["config"]
+ if "sdn-port-mapping" in new_config_dict:
+ remove_port_mapping = True
+ new_sdn_port_mapping = new_config_dict.pop("sdn-port-mapping")
+ # delete null fields
+ to_delete = []
+ for k in new_config_dict:
+ if new_config_dict[k] is None:
+ to_delete.append(k)
+ if k == 'sdn-controller':
+ remove_port_mapping = True
+
+ config_text = datacenter.get("config")
+ if not config_text:
+ config_text = '{}'
+ config_dict = yaml.load(config_text, Loader=yaml.Loader)
+ config_dict.update(new_config_dict)
+ # delete null fields
+ for k in to_delete:
+ del config_dict[k]
+ except Exception as e:
+ raise NfvoException("Bad format at datacenter:config " + str(e), httperrors.Bad_Request)
+ if config_dict:
+ datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
+ else:
+ datacenter_descriptor["config"] = None
+ if remove_port_mapping:
+ try:
+ datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
+ except ovimException as e:
+ raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
+
+ mydb.update_rows('datacenters', datacenter_descriptor, where)
+ if new_sdn_port_mapping:
+ try:
+ datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, new_sdn_port_mapping)
+ except ovimException as e:
+ # Rollback
+ mydb.update_rows('datacenters', datacenter, where)
+ raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
+ return datacenter_id
+
+
+def delete_datacenter(mydb, datacenter):
+ #get nfvo_tenant info
+ datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
+ mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
+ try:
+ datacenter_sdn_port_mapping_delete(mydb, None, datacenter_dict['uuid'])
+ except ovimException as e:
+ raise NfvoException("Error deleting datacenter-port-mapping " + str(e))
+ return datacenter_dict['uuid'] + " " + datacenter_dict['name']
+
+
+def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None, vim_tenant=None, vim_tenant_name=None,
+ vim_username=None, vim_password=None, config=None):
+ global plugins
+ # get datacenter info
+ try:
+ if not datacenter_id:
+ if not vim_id:
+ raise NfvoException("You must provide 'vim_id", http_code=httperrors.Bad_Request)
+ datacenter_id = vim_id
+ datacenter_id, datacenter = get_datacenter_uuid(mydb, None, datacenter_id)
+ datacenter_name = datacenter["name"]
+ datacenter_type = datacenter["type"]
+
+ create_vim_tenant = True if not vim_tenant and not vim_tenant_name else False
+
+ # get nfvo_tenant info
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
+ if vim_tenant_name is None:
+ vim_tenant_name = tenant_dict['name']
+
+ tenants_datacenter_dict = {"nfvo_tenant_id": tenant_dict['uuid'], "datacenter_id": datacenter_id}
+ # #check that this association does not exist before
+ # tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+ # if len(tenants_datacenters)>0:
+ # raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(
+ # datacenter_id, tenant_dict['uuid']), httperrors.Conflict)
+
+ vim_tenant_id_exist_atdb = False
+ if not create_vim_tenant:
+ where_={"datacenter_id": datacenter_id}
+ if vim_tenant is not None:
+ where_["vim_tenant_id"] = vim_tenant
+ if vim_tenant_name is not None:
+ where_["vim_tenant_name"] = vim_tenant_name
+ # check if vim_tenant_id is already at database
+ datacenter_tenants_dict = mydb.get_rows(FROM='datacenter_tenants', WHERE=where_)
+ if len(datacenter_tenants_dict) >= 1:
+ datacenter_tenants_dict = datacenter_tenants_dict[0]
+ vim_tenant_id_exist_atdb = True
+ # TODO check if a field has changed and edit entry at datacenter_tenants at DB
+ else: # result=0
+ datacenter_tenants_dict = {}
+ # insert at table datacenter_tenants
+ else: # if vim_tenant==None:
+ # create tenant at VIM if not provided
+ try:
+ _, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter_id, vim_user=vim_username,
+ vim_passwd=vim_password)
+ datacenter_name = myvim["name"]
+ vim_tenant = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
+ except vimconn.VimConnException as e:
+ raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_name, e),
+ httperrors.Internal_Server_Error)
+ datacenter_tenants_dict = {"created": "true"}
+
+ # fill datacenter_tenants table
+ if not vim_tenant_id_exist_atdb:
+ datacenter_tenants_dict["vim_tenant_id"] = vim_tenant
+ datacenter_tenants_dict["vim_tenant_name"] = vim_tenant_name
+ datacenter_tenants_dict["user"] = vim_username
+ datacenter_tenants_dict["passwd"] = vim_password
+ datacenter_tenants_dict["datacenter_id"] = datacenter_id
+ if name:
+ datacenter_tenants_dict["name"] = name
+ else:
+ datacenter_tenants_dict["name"] = datacenter_name
+ if config:
+ datacenter_tenants_dict["config"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+ id_ = mydb.new_row('datacenter_tenants', datacenter_tenants_dict, add_uuid=True, confidential_data=True)
+ datacenter_tenants_dict["uuid"] = id_
+
+ # fill tenants_datacenters table
+ datacenter_tenant_id = datacenter_tenants_dict["uuid"]
+ tenants_datacenter_dict["datacenter_tenant_id"] = datacenter_tenant_id
+ mydb.new_row('tenants_datacenters', tenants_datacenter_dict)
+
+ # load plugin and create thread
+ plugin_name = "rovim_" + datacenter_type
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="vim")
+ thread_name = get_non_used_vim_name(datacenter_name, datacenter_id)
+ new_thread = vim_thread(task_lock, plugins, thread_name, None, datacenter_tenant_id, db=db)
+ new_thread.start()
+ thread_id = datacenter_tenants_dict["uuid"]
+ vim_threads["running"][thread_id] = new_thread
+ return thread_id
+ except vimconn.VimConnException as e:
+ raise NfvoException(str(e), httperrors.Bad_Request)
+
+
+def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None, name=None, vim_tenant=None,
+ vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
+
+ # get vim_account; check is valid for this tenant
+ from_ = "datacenter_tenants as dt JOIN tenants_datacenters as td ON dt.uuid=td.datacenter_tenant_id"
+ where_ = {"td.nfvo_tenant_id": nfvo_tenant}
+ if datacenter_tenant_id:
+ where_["dt.uuid"] = datacenter_tenant_id
+ if datacenter_id:
+ where_["dt.datacenter_id"] = datacenter_id
+ vim_accounts = mydb.get_rows(SELECT="dt.uuid as uuid, config", FROM=from_, WHERE=where_)
+ if not vim_accounts:
+ raise NfvoException("vim_account not found for this tenant", http_code=httperrors.Not_Found)
+ elif len(vim_accounts) > 1:
+ raise NfvoException("found more than one vim_account for this tenant", http_code=httperrors.Conflict)
+ datacenter_tenant_id = vim_accounts[0]["uuid"]
+ original_config = vim_accounts[0]["config"]
+
+ update_ = {}
+ if config:
+ original_config_dict = yaml.load(original_config, Loader=yaml.Loader)
+ original_config_dict.update(config)
+ update_["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
+ if name:
+ update_['name'] = name
+ if vim_tenant:
+ update_['vim_tenant_id'] = vim_tenant
+ if vim_tenant_name:
+ update_['vim_tenant_name'] = vim_tenant_name
+ if vim_username:
+ update_['user'] = vim_username
+ if vim_password:
+ update_['passwd'] = vim_password
+ if update_:
+ mydb.update_rows("datacenter_tenants", UPDATE=update_, WHERE={"uuid": datacenter_tenant_id})
+
+ vim_threads["running"][datacenter_tenant_id].insert_task("reload")
+ return datacenter_tenant_id
+
+def delete_vim_account(mydb, tenant_id, vim_account_id, datacenter=None):
+ #get nfvo_tenant info
+ if not tenant_id or tenant_id=="any":
+ tenant_uuid = None
+ else:
+ tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+ tenant_uuid = tenant_dict['uuid']
+
+ #check that this association exist before
+ tenants_datacenter_dict = {}
+ if datacenter:
+ datacenter_id, _ = get_datacenter_uuid(mydb, tenant_uuid, datacenter)
+ tenants_datacenter_dict["datacenter_id"] = datacenter_id
+ elif vim_account_id:
+ tenants_datacenter_dict["datacenter_tenant_id"] = vim_account_id
+ if tenant_uuid:
+ tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
+ tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+ if len(tenant_datacenter_list)==0 and tenant_uuid:
+ raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Not_Found)
+
+ #delete this association
+ mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+
+ #get vim_tenant info and deletes
+ warning=''
+ for tenant_datacenter_item in tenant_datacenter_list:
+ vim_tenant_dict = mydb.get_table_by_uuid_name('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+ #try to delete vim:tenant
+ try:
+ mydb.delete_row_by_id('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+ if vim_tenant_dict['created']=='true':
+ #delete tenant at VIM if created by NFVO
+ try:
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
+ except vimconn.VimConnException as e:
+ warning = "Not possible to delete vim_tenant_id {} from VIM: {} ".format(vim_tenant_dict['vim_tenant_id'], str(e))
+ logger.warn(warning)
+ except db_base_Exception as e:
+ logger.error("Cannot delete datacenter_tenants " + str(e))
+ pass # the error will be caused because dependencies, vim_tenant can not be deleted
+ thread_id = tenant_datacenter_item["datacenter_tenant_id"]
+ thread = vim_threads["running"].get(thread_id)
+ if thread:
+ thread.insert_task("exit")
+ vim_threads["deleting"][thread_id] = thread
+ return "datacenter {} detached. {}".format(datacenter_id, warning)
+
+
+def datacenter_action(mydb, tenant_id, datacenter, action_dict):
+ #DEPRECATED
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+ if 'check-connectivity' in action_dict:
+ try:
+ myvim.check_vim_connectivity()
+ except vimconn.VimConnException as e:
+ #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+ raise NfvoException(str(e), e.http_code)
+ elif 'net-update' in action_dict:
+ try:
+ nets = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
+ #print content
+ except vimconn.VimConnException as e:
+ #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+ raise NfvoException(str(e), httperrors.Internal_Server_Error)
+ #update nets Change from VIM format to NFVO format
+ net_list=[]
+ for net in nets:
+ net_nfvo={'datacenter_id': datacenter_id}
+ net_nfvo['name'] = net['name']
+ #net_nfvo['description']= net['name']
+ net_nfvo['vim_net_id'] = net['id']
+ net_nfvo['type'] = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man') to ('bridge','data','ptp')
+ net_nfvo['shared'] = net['shared']
+ net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+ net_list.append(net_nfvo)
+ inserted, deleted = mydb.update_datacenter_nets(datacenter_id, net_list)
+ logger.info("Inserted %d nets, deleted %d old nets", inserted, deleted)
+ return inserted
+ elif 'net-edit' in action_dict:
+ net = action_dict['net-edit'].pop('net')
+ what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+ result = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
+ WHERE={'datacenter_id':datacenter_id, what: net})
+ return result
+ elif 'net-delete' in action_dict:
+ net = action_dict['net-deelte'].get('net')
+ what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+ result = mydb.delete_row(FROM='datacenter_nets',
+ WHERE={'datacenter_id':datacenter_id, what: net})
+ return result
+
+ else:
+ raise NfvoException("Unknown action " + str(action_dict), httperrors.Bad_Request)
+
+
+def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
+ #get datacenter info
+ datacenter_id, _ = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+ what = 'uuid' if utils.check_valid_uuid(netmap) else 'name'
+ result = mydb.update_rows('datacenter_nets', action_dict['netmap'],
+ WHERE={'datacenter_id':datacenter_id, what: netmap})
+ return result
+
+
+def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ filter_dict={}
+ if action_dict:
+ action_dict = action_dict["netmap"]
+ if 'vim_id' in action_dict:
+ filter_dict["id"] = action_dict['vim_id']
+ if 'vim_name' in action_dict:
+ filter_dict["name"] = action_dict['vim_name']
+ else:
+ filter_dict["shared"] = True
+
+ try:
+ vim_nets = myvim.get_network_list(filter_dict=filter_dict)
+ except vimconn.VimConnException as e:
+ #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
+ raise NfvoException(str(e), httperrors.Internal_Server_Error)
+ if len(vim_nets)>1 and action_dict:
+ raise NfvoException("more than two networks found, specify with vim_id", httperrors.Conflict)
+ elif len(vim_nets)==0: # and action_dict:
+ raise NfvoException("Not found a network at VIM with " + str(filter_dict), httperrors.Not_Found)
+ net_list=[]
+ for net in vim_nets:
+ net_nfvo={'datacenter_id': datacenter_id}
+ if action_dict and "name" in action_dict:
+ net_nfvo['name'] = action_dict['name']
+ else:
+ net_nfvo['name'] = net['name']
+ #net_nfvo['description']= net['name']
+ net_nfvo['vim_net_id'] = net['id']
+ net_nfvo['type'] = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man') to ('bridge','data','ptp')
+ net_nfvo['shared'] = net['shared']
+ net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+ try:
+ net_id = mydb.new_row("datacenter_nets", net_nfvo, add_uuid=True)
+ net_nfvo["status"] = "OK"
+ net_nfvo["uuid"] = net_id
+ except db_base_Exception as e:
+ if action_dict:
+ raise
+ else:
+ net_nfvo["status"] = "FAIL: " + str(e)
+ net_list.append(net_nfvo)
+ return net_list
+
+def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
+ # obtain all network data
+ try:
+ if utils.check_valid_uuid(network_id):
+ filter_dict = {"id": network_id}
+ else:
+ filter_dict = {"name": network_id}
+
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ network = myvim.get_network_list(filter_dict=filter_dict)
+ except vimconn.VimConnException as e:
+ raise NfvoException("Not possible to get_sdn_net_id from VIM: {}".format(str(e)), e.http_code)
+
+ # ensure the network is defined
+ if len(network) == 0:
+ raise NfvoException("Network {} is not present in the system".format(network_id),
+ httperrors.Bad_Request)
+
+ # ensure there is only one network with the provided name
+ if len(network) > 1:
+ raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), httperrors.Bad_Request)
+
+ # ensure it is a dataplane network
+ if network[0]['type'] != 'data':
+ return None
+
+ # ensure we use the id
+ network_id = network[0]['id']
+
+ # search in dabase mano_db in table instance nets for the sdn_net_id that corresponds to the vim_net_id==network_id
+ # and with instance_scenario_id==NULL
+ #search_dict = {'vim_net_id': network_id, 'instance_scenario_id': None}
+ search_dict = {'vim_net_id': network_id}
+
+ try:
+ #sdn_network_id = mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)[0]['sdn_net_id']
+ result = mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)
+ except db_base_Exception as e:
+ raise NfvoException("db_base_Exception obtaining SDN network to associated to vim network {}".format(
+ network_id) + str(e), e.http_code)
+
+ sdn_net_counter = 0
+ for net in result:
+ if net['sdn_net_id'] != None:
+ sdn_net_counter+=1
+ sdn_net_id = net['sdn_net_id']
+
+ if sdn_net_counter == 0:
+ return None
+ elif sdn_net_counter == 1:
+ return sdn_net_id
+ else:
+ raise NfvoException("More than one SDN network is associated to vim network {}".format(
+ network_id), httperrors.Internal_Server_Error)
+
+def get_sdn_controller_id(mydb, datacenter):
+ # Obtain sdn controller id
+ config = mydb.get_rows(SELECT=('config',), FROM='datacenters', WHERE={'uuid': datacenter})[0].get('config', '{}')
+ if not config:
+ return None
+
+ return yaml.load(config, Loader=yaml.Loader).get('sdn-controller')
+
+def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
+ try:
+ sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+ if not sdn_network_id:
+ raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), httperrors.Internal_Server_Error)
+
+ #Obtain sdn controller id
+ controller_id = get_sdn_controller_id(mydb, datacenter)
+ if not controller_id:
+ raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), httperrors.Internal_Server_Error)
+
+ #Obtain sdn controller info
+ sdn_controller = ovim.show_of_controller(controller_id)
+
+ port_data = {
+ 'name': 'external_port',
+ 'net_id': sdn_network_id,
+ 'ofc_id': controller_id,
+ 'switch_dpid': sdn_controller['dpid'],
+ 'switch_port': descriptor['port']
+ }
+
+ if 'vlan' in descriptor:
+ port_data['vlan'] = descriptor['vlan']
+ if 'mac' in descriptor:
+ port_data['mac'] = descriptor['mac']
+
+ result = ovim.new_port(port_data)
+ except ovimException as e:
+ raise NfvoException("ovimException attaching SDN network {} to vim network {}".format(
+ sdn_network_id, network_id) + str(e), httperrors.Internal_Server_Error)
+ except db_base_Exception as e:
+ raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
+ network_id) + str(e), e.http_code)
+
+ return 'Port uuid: '+ result
+
+def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
+ if port_id:
+ filter = {'uuid': port_id}
+ else:
+ sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+ if not sdn_network_id:
+ raise NfvoException("No SDN network is associated to vim-network {}".format(network_id),
+ httperrors.Internal_Server_Error)
+ #in case no port_id is specified only ports marked as 'external_port' will be detached
+ filter = {'name': 'external_port', 'net_id': sdn_network_id}
+
+ try:
+ port_list = ovim.get_ports(columns={'uuid'}, filter=filter)
+ except ovimException as e:
+ raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
+ httperrors.Internal_Server_Error)
+
+ if len(port_list) == 0:
+ raise NfvoException("No ports attached to the network {} were found with the requested criteria".format(network_id),
+ httperrors.Bad_Request)
+
+ port_uuid_list = []
+ for port in port_list:
+ try:
+ port_uuid_list.append(port['uuid'])
+ ovim.delete_port(port['uuid'])
+ except ovimException as e:
+ raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), httperrors.Internal_Server_Error)
+
+ return 'Detached ports uuid: {}'.format(','.join(port_uuid_list))
+
+def vim_action_get(mydb, tenant_id, datacenter, item, name):
+ #get datacenter info
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ filter_dict={}
+ if name:
+ if utils.check_valid_uuid(name):
+ filter_dict["id"] = name
+ else:
+ filter_dict["name"] = name
+ try:
+ if item=="networks":
+ #filter_dict['tenant_id'] = myvim['tenant_id']
+ content = myvim.get_network_list(filter_dict=filter_dict)
+
+ if len(content) == 0:
+ raise NfvoException("Network {} is not present in the system. ".format(name),
+ httperrors.Bad_Request)
+
+ #Update the networks with the attached ports
+ for net in content:
+ sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, net['id'])
+ if sdn_network_id != None:
+ try:
+ #port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan'}, filter={'name': 'external_port', 'net_id': sdn_network_id})
+ port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan','name'}, filter={'net_id': sdn_network_id})
+ except ovimException as e:
+ raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), httperrors.Internal_Server_Error)
+ #Remove field name and if port name is external_port save it as 'type'
+ for port in port_list:
+ if port['name'] == 'external_port':
+ port['type'] = "External"
+ del port['name']
+ net['sdn_network_id'] = sdn_network_id
+ net['sdn_attached_ports'] = port_list
+
+ elif item=="tenants":
+ content = myvim.get_tenant_list(filter_dict=filter_dict)
+ elif item == "images":
+
+ content = myvim.get_image_list(filter_dict=filter_dict)
+ else:
+ raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+ logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
+ if name and len(content)==1:
+ return {item[:-1]: content[0]}
+ elif name and len(content)==0:
+ raise NfvoException("No {} found with ".format(item[:-1]) + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), filter_dict.items())),
+ datacenter)
+ else:
+ return {item: content}
+ except vimconn.VimConnException as e:
+ print("vim_action Not possible to get_{}_list from VIM: {} ".format(item, str(e)))
+ raise NfvoException("Not possible to get_{}_list from VIM: {}".format(item, str(e)), e.http_code)
+
+
+def vim_action_delete(mydb, tenant_id, datacenter, item, name):
+ #get datacenter info
+ if tenant_id == "any":
+ tenant_id=None
+
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ #get uuid name
+ content = vim_action_get(mydb, tenant_id, datacenter, item, name)
+ logger.debug("vim_action_delete vim response: " + str(content))
+ items = next(iter(content.values()))
+ if type(items)==list and len(items)==0:
+ raise NfvoException("Not found " + item, httperrors.Not_Found)
+ elif type(items)==list and len(items)>1:
+ raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), httperrors.Not_Found)
+ else: # it is a dict
+ item_id = items["id"]
+ item_name = str(items.get("name"))
+
+ try:
+ if item=="networks":
+ # If there is a SDN network associated to the vim-network, proceed to clear the relationship and delete it
+ sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, item_id)
+ if sdn_network_id != None:
+ #Delete any port attachment to this network
+ try:
+ port_list = ovim.get_ports(columns={'uuid'}, filter={'net_id': sdn_network_id})
+ except ovimException as e:
+ raise NfvoException(
+ "ovimException obtaining external ports for net {}. ".format(sdn_network_id) + str(e),
+ httperrors.Internal_Server_Error)
+
+ # By calling one by one all ports to be detached we ensure that not only the external_ports get detached
+ for port in port_list:
+ vim_net_sdn_detach(mydb, tenant_id, datacenter, item_id, port['uuid'])
+
+ #Delete from 'instance_nets' the correspondence between the vim-net-id and the sdn-net-id
+ try:
+ mydb.delete_row(FROM='instance_nets', WHERE={'instance_scenario_id': None,
+ 'sdn_net_id': sdn_network_id,
+ 'vim_net_id': item_id})
+ except db_base_Exception as e:
+ raise NfvoException("Error deleting correspondence for VIM/SDN dataplane networks{}: {}".format(
+ item_id, e), e.http_code)
+
+ #Delete the SDN network
+ try:
+ ovim.delete_network(sdn_network_id)
+ except ovimException as e:
+ logger.error("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e), exc_info=True)
+ raise NfvoException("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e),
+ httperrors.Internal_Server_Error)
+
+ content = myvim.delete_network(item_id)
+ elif item=="tenants":
+ content = myvim.delete_tenant(item_id)
+ elif item == "images":
+ content = myvim.delete_image(item_id)
+ else:
+ raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+ except vimconn.VimConnException as e:
+ #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
+ raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
+
+ return "{} {} {} deleted".format(item[:-1], item_id,item_name)
+
+
+def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
+ #get datacenter info
+ logger.debug("vim_action_create descriptor %s", str(descriptor))
+ if tenant_id == "any":
+ tenant_id=None
+ datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+ try:
+ if item=="networks":
+ net = descriptor["network"]
+ net_name = net.pop("name")
+ net_type = net.pop("type", "bridge")
+ net_public = net.pop("shared", False)
+ net_ipprofile = net.pop("ip_profile", None)
+ net_vlan = net.pop("vlan", None)
+ net_provider_network_profile = None
+ if net_vlan:
+ net_provider_network_profile = {"segmentation-id": net_vlan}
+ content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, provider_network_profile=net_provider_network_profile) #, **net)
+
+ #If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network
+ if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'):
+ #obtain datacenter_tenant_id
+ datacenter_tenant_id = mydb.get_rows(SELECT=('uuid',),
+ FROM='datacenter_tenants',
+ WHERE={'datacenter_id': datacenter})[0]['uuid']
+ try:
+ sdn_network = {}
+ sdn_network['vlan'] = net_vlan
+ sdn_network['type'] = net_type
+ sdn_network['name'] = net_name
+ sdn_network['region'] = datacenter_tenant_id
+ ovim_content = ovim.new_network(sdn_network)
+ except ovimException as e:
+ logger.error("ovimException creating SDN network={} ".format(
+ sdn_network) + str(e), exc_info=True)
+ raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e),
+ httperrors.Internal_Server_Error)
+
+ # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary vim_net_id <->sdn_net_id
+ # use instance_scenario_id=None to distinguish from real instaces of nets
+ correspondence = {'instance_scenario_id': None,
+ 'sdn_net_id': ovim_content,
+ 'vim_net_id': content,
+ 'datacenter_tenant_id': datacenter_tenant_id
+ }
+ try:
+ mydb.new_row('instance_nets', correspondence, add_uuid=True)
+ except db_base_Exception as e:
+ raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format(
+ correspondence, e), e.http_code)
+ elif item=="tenants":
+ tenant = descriptor["tenant"]
+ content = myvim.new_tenant(tenant["name"], tenant.get("description"))
+ else:
+ raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+ except vimconn.VimConnException as e:
+ raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
+
+ return vim_action_get(mydb, tenant_id, datacenter, item, content)
+
+def sdn_controller_create(mydb, tenant_id, sdn_controller):
+ try:
+ wim_id = ovim.new_of_controller(sdn_controller)
+
+ # Load plugin if not previously loaded
+ controller_type = sdn_controller.get("type")
+ plugin_name = "rosdn_" + controller_type
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="sdn")
+
+ thread_name = get_non_used_vim_name(sdn_controller['name'], wim_id)
+ new_thread = vim_thread(task_lock, plugins, thread_name, wim_id, None, db=db)
+ new_thread.start()
+ thread_id = wim_id
+ vim_threads["running"][thread_id] = new_thread
+ logger.debug('New SDN controller created with uuid {}'.format(wim_id))
+ return wim_id
+ except ovimException as e:
+ raise NfvoException(e) from e
+
+def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
+ data = ovim.edit_of_controller(controller_id, sdn_controller)
+ msg = 'SDN controller {} updated'.format(data)
+ vim_threads["running"][controller_id].insert_task("reload")
+ logger.debug(msg)
+ return msg
+
+def sdn_controller_list(mydb, tenant_id, controller_id=None):
+ if controller_id == None:
+ data = ovim.get_of_controllers()
+ else:
+ data = ovim.show_of_controller(controller_id)
+
+ msg = 'SDN controller list:\n {}'.format(data)
+ logger.debug(msg)
+ return data
+
+def sdn_controller_delete(mydb, tenant_id, controller_id):
+ select_ = ('uuid', 'config')
+ datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
+ for datacenter in datacenters:
+ if datacenter['config']:
+ config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+ if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
+ raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), httperrors.Conflict)
+
+ data = ovim.delete_of_controller(controller_id)
+ msg = 'SDN controller {} deleted'.format(data)
+ logger.debug(msg)
+ return msg
+
+def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
+ controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
+ if len(controller) < 1:
+ raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), httperrors.Not_Found)
+
+ try:
+ sdn_controller_id = yaml.load(controller[0]["config"], Loader=yaml.Loader)["sdn-controller"]
+ except:
+ raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), httperrors.Bad_Request)
+
+ sdn_controller = ovim.show_of_controller(sdn_controller_id)
+ switch_dpid = sdn_controller["dpid"]
+
+ maps = list()
+ for compute_node in sdn_port_mapping:
+ #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
+ element = dict()
+ element["compute_node"] = compute_node["compute_node"]
+ if compute_node["ports"]:
+ for port in compute_node["ports"]:
+ pci = port.get("pci")
+ element["switch_port"] = port.get("switch_port")
+ element["switch_mac"] = port.get("switch_mac")
+ element["switch_dpid"] = port.get("switch_dpid")
+ element["switch_id"] = port.get("switch_id")
+ if not element["switch_port"] and not element["switch_mac"]:
+ raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
+ for pci_expanded in utils.expand_brackets(pci):
+ element["pci"] = pci_expanded
+ maps.append(dict(element))
+
+ out = ovim.set_of_port_mapping(maps, sdn_id=sdn_controller_id, switch_dpid=switch_dpid, vim_id=datacenter_id)
+ vim_threads["running"][sdn_controller_id].insert_task("reload")
+ return out
+
+def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
+ maps = ovim.get_of_port_mappings(db_filter={"datacenter_id": datacenter_id})
+
+ result = {
+ "sdn-controller": None,
+ "datacenter-id": datacenter_id,
+ "dpid": None,
+ "ports_mapping": list()
+ }
+
+ datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
+ if datacenter['config']:
+ config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+ if 'sdn-controller' in config:
+ controller_id = config['sdn-controller']
+ sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
+ result["sdn-controller"] = controller_id
+ result["dpid"] = sdn_controller["dpid"]
+
+ if result["sdn-controller"] == None:
+ raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), httperrors.Bad_Request)
+ if result["dpid"] == None:
+ raise NfvoException("It was not possible to determine DPID for SDN controller {}".format(result["sdn-controller"]),
+ httperrors.Internal_Server_Error)
+
+ if len(maps) == 0:
+ return result
+
+ ports_correspondence_dict = dict()
+ for link in maps:
+ if result["sdn-controller"] != link["wim_id"]:
+ raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
+ if result["dpid"] != link["switch_dpid"]:
+ raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
+ link_config = link["service_mapping_info"]
+ element = dict()
+ element["pci"] = link.get("device_interface_id")
+ if link["switch_port"]:
+ element["switch_port"] = link["switch_port"]
+ if link_config["switch_mac"]:
+ element["switch_mac"] = link_config.get("switch_mac")
+
+ if not link.get("interface_id") in ports_correspondence_dict:
+ content = dict()
+ content["compute_node"] = link.get("interface_id")
+ content["ports"] = list()
+ ports_correspondence_dict[link.get("interface_id")] = content
+
+ ports_correspondence_dict[link["interface_id"]]["ports"].append(element)
+
+ for key in sorted(ports_correspondence_dict):
+ result["ports_mapping"].append(ports_correspondence_dict[key])
+
+ return result
+
+def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
+ return ovim.clear_of_port_mapping(db_filter={"datacenter_id":datacenter_id})
+
+def create_RO_keypair(tenant_id):
+ """
+ Creates a public / private keys for a RO tenant and returns their values
+ Params:
+ tenant_id: ID of the tenant
+ Return:
+ public_key: Public key for the RO tenant
+ private_key: Encrypted private key for RO tenant
+ """
+
+ bits = 2048
+ key = RSA.generate(bits)
+ try:
+ public_key = key.publickey().exportKey('OpenSSH')
+ if isinstance(public_key, ValueError):
+ raise NfvoException("Unable to create public key: {}".format(public_key), httperrors.Internal_Server_Error)
+ private_key = key.exportKey(passphrase=tenant_id, pkcs=8)
+ except (ValueError, NameError) as e:
+ raise NfvoException("Unable to create private key: {}".format(e), httperrors.Internal_Server_Error)
+ if isinstance(public_key, bytes):
+ public_key = public_key.decode(encoding='UTF-8')
+ if isinstance(private_key, bytes):
+ private_key = private_key.decode(encoding='UTF-8')
+ return public_key, private_key
+
+def decrypt_key (key, tenant_id):
+ """
+ Decrypts an encrypted RSA key
+ Params:
+ key: Private key to be decrypted
+ tenant_id: ID of the tenant
+ Return:
+ unencrypted_key: Unencrypted private key for RO tenant
+ """
+ try:
+ key = RSA.importKey(key,tenant_id)
+ unencrypted_key = key.exportKey('PEM')
+ if isinstance(unencrypted_key, ValueError):
+ raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), httperrors.Internal_Server_Error)
+ if isinstance(unencrypted_key, bytes):
+ unencrypted_key = unencrypted_key.decode(encoding='UTF-8')
+ except ValueError as e:
+ raise NfvoException("Unable to decrypt the private key: {}".format(e), httperrors.Internal_Server_Error)
+ return unencrypted_key
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+NFVO DB engine. It implements all the methods to interact with the Openmano Database
+"""
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$28-aug-2014 10:05:01$"
+
+from osm_ro import db_base
+import MySQLdb as mdb
+import json
+import yaml
+import time
+#import sys, os
+
+from osm_ro.db_base import retry, with_transaction
+from osm_ro.http_tools import errors as httperrors
+from osm_ro.utils import Attempt
+
+
+_ATTEMPT = Attempt()
+
+
+tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
+ "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
+ "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
+ "instance_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+ "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
+ "instance_classifications", "instance_sfps", "wims", "wim_accounts", "wim_nfvo_tenants",
+ "wim_port_mappings", "vim_wim_actions", "instance_interfaces",
+ "instance_wim_nets"]
+
+
+class nfvo_db(db_base.db_base):
+ def __init__(self, host=None, user=None, passwd=None, database=None,
+ log_name='openmano.db', log_level=None, lock=None):
+ db_base.db_base.__init__(self, host, user, passwd, database,
+ log_name, log_level, lock)
+ db_base.db_base.tables_with_created_field=tables_with_createdat_field
+ return
+
+ @retry
+ @with_transaction
+ def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+ self.logger.debug("Adding new vnf to the NFVO database")
+ created_time = time.time()
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.items():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
+ int(bridgeiface.get('port_security', True))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
+ int(bridgeiface.get('floating_ip', False))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ dataifacesDict[vm['name']][dataiface['name']] = {}
+ dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface.get('vpci')
+ dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
+ dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
+ 'dedicated'] == "yes" else (
+ "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ internalconnList = []
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ if ifaceItem ["type"] == "data":
+ dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = dataiface['vpci']
+ ifaceItem["bw"] = dataiface['bw']
+ ifaceItem["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = bridgeiface['vpci']
+ ifaceItem["mac"] = bridgeiface['mac']
+ ifaceItem["bw"] = bridgeiface['bw']
+ ifaceItem["model"] = bridgeiface['model']
+ ifaceItem["port_security"] = bridgeiface['port_security']
+ ifaceItem["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ internalconnList.append(ifaceItem)
+ #print "Internal net id in NFVO DB: %s" % net_id
+
+ #print "Adding internal interfaces to the NFVO database (if any)"
+ for iface in internalconnList:
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = dataiface['vpci']
+ myIfaceDict["bw"] = dataiface['bw']
+ myIfaceDict["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = bridgeiface['vpci']
+ myIfaceDict["bw"] = bridgeiface['bw']
+ myIfaceDict["model"] = bridgeiface['model']
+ myIfaceDict["mac"] = bridgeiface['mac']
+ myIfaceDict["port_security"]= bridgeiface['port_security']
+ myIfaceDict["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
+ @retry
+ @with_transaction
+ def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+ self.logger.debug("Adding new vnf to the NFVO database")
+ created_time = time.time()
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.items():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ ifaceDict = {}
+ ifaceDict['vpci'] = bridgeiface.get('vpci',None)
+ ifaceDict['mac'] = bridgeiface.get('mac_address',None)
+ ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
+ ifaceDict['model'] = bridgeiface.get('model', None)
+ ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
+ ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
+ ifaceDict['created_time'] = created_time
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ ifaceDict = {}
+ ifaceDict['vpci'] = dataiface.get('vpci')
+ ifaceDict['bw'] = dataiface['bandwidth']
+ ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
+ ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ ifaceDict['created_time'] = created_time
+ dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ if (net["implementation"] == "overlay"):
+ net["type"] = "bridge"
+ #It should give an error if the type is e-line. For the moment, we consider it as a bridge
+ elif (net["implementation"] == "underlay"):
+ if (net["type"] == "e-line"):
+ net["type"] = "ptp"
+ elif (net["type"] == "e-lan"):
+ net["type"] = "data"
+ net.pop("implementation")
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ if "ip-profile" in net:
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {}
+ myIPProfileDict["net_id"] = net_id
+ myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
+ myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
+ myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
+ myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
+ if ("dhcp" in ip_profile):
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
+
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ ifaceItem["ip_address"] = element.get('ip_address',None)
+ if ifaceItem ["type"] == "data":
+ ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ else:
+ ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["mac"] = ifaceDict['mac']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ ifaceItem["port_security"] = ifaceDict['port_security']
+ ifaceItem["floating_ip"] = ifaceDict['floating_ip']
+ created_time_iface = ifaceDict["created_time"]
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ myIfaceDict["vpci"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
+ else:
+ myIfaceDict["vpci"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
+ myIfaceDict["port_security"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
+ myIfaceDict["floating_ip"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
+ created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
+# except KeyError as e2:
+# exc_type, exc_obj, exc_tb = sys.exc_info()
+# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
+# self.logger.debug("Exception type: %s; Filename: %s; Line number: %s", exc_type, fname, exc_tb.tb_lineno)
+# raise KeyError
+
+ @retry
+ @with_transaction
+ def new_scenario(self, scenario_dict):
+ created_time = time.time()
+ tenant_id = scenario_dict.get('tenant_id')
+ #scenario
+ INSERT_={'tenant_id': tenant_id,
+ 'name': scenario_dict['name'],
+ 'description': scenario_dict['description'],
+ 'public': scenario_dict.get('public', "false")}
+
+ scenario_uuid = self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+ #sce_nets
+ for net in scenario_dict['nets'].values():
+ net_dict={'scenario_id': scenario_uuid}
+ net_dict["name"] = net["name"]
+ net_dict["type"] = net["type"]
+ net_dict["description"] = net.get("description")
+ net_dict["external"] = net.get("external", False)
+ if "graph" in net:
+ #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ net_dict["graph"]=json.dumps(net["graph"])
+ created_time += 0.00001
+ net_uuid = self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
+ net['uuid']=net_uuid
+
+ if net.get("ip-profile"):
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {
+ "sce_net_id": net_uuid,
+ "ip_version": ip_profile.get('ip-version', "IPv4"),
+ "subnet_address": ip_profile.get('subnet-address'),
+ "gateway_address": ip_profile.get('gateway-address'),
+ "dns_address": ip_profile.get('dns-address')}
+ if "dhcp" in ip_profile:
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
+ self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ # sce_vnfs
+ for k, vnf in scenario_dict['vnfs'].items():
+ INSERT_ = {'scenario_id': scenario_uuid,
+ 'name': k,
+ 'vnf_id': vnf['uuid'],
+ # 'description': scenario_dict['name']
+ 'description': vnf['description']}
+ if "graph" in vnf:
+ #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
+ # TODO, must be json because of the GUI, change to yaml
+ INSERT_["graph"] = json.dumps(vnf["graph"])
+ created_time += 0.00001
+ scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+ vnf['scn_vnf_uuid']=scn_vnf_uuid
+ # sce_interfaces
+ for iface in vnf['ifaces'].values():
+ # print 'iface', iface
+ if 'net_key' not in iface:
+ continue
+ iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
+ INSERT_={'sce_vnf_id': scn_vnf_uuid,
+ 'sce_net_id': iface['net_id'],
+ 'interface_id': iface['uuid'],
+ 'ip_address': iface.get('ip_address')}
+ created_time += 0.00001
+ iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+
+ return scenario_uuid
+
+ @retry
+ @with_transaction
+ def edit_scenario(self, scenario_dict):
+ modified_time = time.time()
+ item_changed=0
+ #check that scenario exist
+ tenant_id = scenario_dict.get('tenant_id')
+ scenario_uuid = scenario_dict['uuid']
+
+ where_text = "uuid='{}'".format(scenario_uuid)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+
+ #scenario
+ nodes = {}
+ topology = scenario_dict.pop("topology", None)
+ if topology != None and "nodes" in topology:
+ nodes = topology.get("nodes",{})
+ UPDATE_ = {}
+ if "name" in scenario_dict: UPDATE_["name"] = scenario_dict["name"]
+ if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
+ if len(UPDATE_)>0:
+ WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
+ item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
+ #sce_nets
+ for node_id, node in nodes.items():
+ if "graph" in node:
+ #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ node["graph"] = json.dumps(node["graph"])
+ WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
+ #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
+ item_changed += self._update_rows('sce_nets', node, WHERE_)
+ item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
+ return item_changed
+
+# def get_instance_scenario(self, instance_scenario_id, tenant_id=None):
+# '''Obtain the scenario instance information, filtering by one or several of the tenant, uuid or name
+# instance_scenario_id is the uuid or the name if it is not a valid uuid format
+# Only one scenario isntance must mutch the filtering or an error is returned
+# '''
+# print "1******************************************************************"
+# try:
+# with self.transaction(mdb.cursors.DictCursor):
+# #scenario table
+# where_list=[]
+# if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+# if db_base._check_valid_uuid(instance_scenario_id):
+# where_list.append( "uuid='" + instance_scenario_id +"'" )
+# else:
+# where_list.append( "name='" + instance_scenario_id +"'" )
+# where_text = " AND ".join(where_list)
+# self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text)
+# rows = self.cur.fetchall()
+# if self.cur.rowcount==0:
+# return -httperrors.Bad_Request, "No scenario instance found with this criteria " + where_text
+# elif self.cur.rowcount>1:
+# return -httperrors.Bad_Request, "More than one scenario instance found with this criteria " + where_text
+# instance_scenario_dict = rows[0]
+#
+# #instance_vnfs
+# self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+# instance_scenario_dict['instance_vnfs'] = self.cur.fetchall()
+# for vnf in instance_scenario_dict['instance_vnfs']:
+# #instance_vms
+# self.cur.execute("SELECT uuid, vim_vm_id "+
+# "FROM instance_vms "+
+# "WHERE instance_vnf_id='" + vnf['uuid'] +"'"
+# )
+# vnf['instance_vms'] = self.cur.fetchall()
+# #instance_nets
+# self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+# instance_scenario_dict['instance_nets'] = self.cur.fetchall()
+#
+# #instance_interfaces
+# self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+# instance_scenario_dict['instance_interfaces'] = self.cur.fetchall()
+#
+# db_base._convert_datetime2str(instance_scenario_dict)
+# db_base._convert_str2boolean(instance_scenario_dict, ('public','shared','external') )
+# print "2******************************************************************"
+# return 1, instance_scenario_dict
+# except (mdb.Error, AttributeError) as e:
+# print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1])
+# return self._format_error(e)
+
+ @retry
+ @with_transaction(cursor='dict')
+ def get_scenario(self, scenario_id, tenant_id=None, datacenter_vim_id=None, datacenter_id=None):
+ '''Obtain the scenario information, filtering by one or several of the tenant, uuid or name
+ scenario_id is the uuid or the name if it is not a valid uuid format
+ if datacenter_vim_id,d datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
+ Only one scenario must mutch the filtering or an error is returned
+ '''
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ scenario_dict = rows[0]
+ if scenario_dict["cloud_config"]:
+ scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"], Loader=yaml.Loader)
+ del scenario_dict["cloud_config"]
+ # sce_vnfs
+ cmd = "SELECT uuid,name,member_vnf_index,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnfs'] = self.cur.fetchall()
+
+ for vnf in scenario_dict['vnfs']:
+ cmd = "SELECT mgmt_access FROM vnfs WHERE uuid='{}'".format(scenario_dict['vnfs'][0]['vnf_id'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ mgmt_access_dict = self.cur.fetchall()
+ if mgmt_access_dict[0].get('mgmt_access'):
+ vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'], Loader=yaml.Loader)
+ else:
+ vnf['mgmt_access'] = None
+ # sce_interfaces
+ cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
+ " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
+ " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['interfaces'] = self.cur.fetchall()
+ # vms
+ cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
+ " vms.description as description, vms.boot_data as boot_data, count," \
+ " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
+ " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
+ " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'" \
+ " ORDER BY vms.created_at"
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ if vm["boot_data"]:
+ vm["boot_data"] = yaml.safe_load(vm["boot_data"])
+ else:
+ del vm["boot_data"]
+ if vm["image_list"]:
+ vm["image_list"] = yaml.safe_load(vm["image_list"])
+ else:
+ del vm["image_list"]
+ if datacenter_vim_id!=None:
+ if vm['image_id']:
+ cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND " \
+ "datacenter_vim_id='{}'".format(vm['image_id'], datacenter_vim_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ if self.cur.rowcount==1:
+ vim_image_dict = self.cur.fetchone()
+ vm['vim_image_id']=vim_image_dict['vim_id']
+ if vm['flavor_id']:
+ cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND " \
+ "datacenter_vim_id='{}'".format(vm['flavor_id'], datacenter_vim_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ if self.cur.rowcount==1:
+ vim_flavor_dict = self.cur.fetchone()
+ vm['vim_flavor_id']=vim_flavor_dict['vim_id']
+
+ #interfaces
+ cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
+ "floating_ip, port_security" \
+ " FROM interfaces" \
+ " WHERE vm_id='{}'" \
+ " ORDER BY created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vm['interfaces'] = self.cur.fetchall()
+ for iface in vm['interfaces']:
+ iface['port-security'] = iface.pop("port_security")
+ iface['floating-ip'] = iface.pop("floating_ip")
+ for sce_interface in vnf["interfaces"]:
+ if sce_interface["interface_id"] == iface["uuid"]:
+ if sce_interface["ip_address"]:
+ iface["ip_address"] = sce_interface["ip_address"]
+ break
+ #nets every net of a vms
+ cmd = "SELECT uuid,name,type,description, osm_id FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['nets'] = self.cur.fetchall()
+ for vnf_net in vnf['nets']:
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ vnf_net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
+
+ #sce_nets
+ cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
+ " FROM sce_nets WHERE scenario_id='{}'" \
+ " ORDER BY created_at ".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['nets'] = self.cur.fetchall()
+ #datacenter_nets
+ for net in scenario_dict['nets']:
+ if str(net['external']) == 'false':
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+ continue
+ WHERE_=" WHERE name='{}'".format(net['name'])
+ if datacenter_id!=None:
+ WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
+ cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ d_net = self.cur.fetchone()
+ if d_net==None or datacenter_vim_id==None:
+ #print "nfvo_db.get_scenario() WARNING external net %s not found" % net['name']
+ net['vim_id']=None
+ else:
+ net['vim_id']=d_net['vim_net_id']
+
+ db_base._convert_datetime2str(scenario_dict)
+ db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+ #forwarding graphs
+ cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnffgs'] = self.cur.fetchall()
+ for vnffg in scenario_dict['vnffgs']:
+ cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+ "ORDER BY created_at".format(vnffg['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnffg['rsps'] = self.cur.fetchall()
+ for rsp in vnffg['rsps']:
+ cmd = "SELECT uuid,if_order,ingress_interface_id,egress_interface_id,sce_vnf_id " \
+ "FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+ "ORDER BY created_at".format(rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['connection_points'] = self.cur.fetchall();
+ cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+ "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier'] = self.cur.fetchone();
+ cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+ "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier']['matches'] = self.cur.fetchall()
+
+ return scenario_dict
+
+ @retry(command="delete", extra="instances running")
+ @with_transaction(cursor='dict')
+ def delete_scenario(self, scenario_id, tenant_id=None):
+ '''Deletes a scenario, filtering by one or several of the tenant, uuid or name
+ scenario_id is the uuid or the name if it is not a valid uuid format
+ Only one scenario must mutch the filtering or an error is returned
+ '''
+ #scenario table
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
+ scenario_uuid = rows[0]["uuid"]
+ scenario_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return scenario_uuid + " " + scenario_name
+
+ @retry
+ @with_transaction
+ def new_rows(self, tables, uuid_list=None, confidential_data=False, attempt=_ATTEMPT):
+ """
+ Make a transactional insertion of rows at several tables. Can be also a deletion
+ :param tables: list with dictionary where the keys are the table names and the values are a row or row list
+ with the values to be inserted at the table. Each row is a dictionary with the key values. E.g.:
+ tables = [
+ {"table1": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+ {"table2": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+ {"table3": {"column1": value, "column2: value, ... }
+ }
+ If tables does not contain the 'created_at', it is generated incrementally with the order of tables. You can
+ provide a integer value, that it is an index multiply by 0.00001 to add to the created time to manually set
+ up and order
+ If dict contains {"TO-DELETE": uuid} the entry is deleted if exist instead of inserted
+ :param uuid_list: list of created uuids, first one is the root (#TODO to store at uuid table)
+ :return: None if success, raise exception otherwise
+ """
+ table_name = None
+ created_time = time.time()
+ for table in tables:
+ for table_name, row_list in table.items():
+ index = 0
+ attempt.info['table'] = table_name
+ if isinstance(row_list, dict):
+ row_list = (row_list, ) #create a list with the single value
+ for row in row_list:
+ if "TO-DELETE" in row:
+ self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
+ continue
+ if "TO-UPDATE" in row:
+ self._update_rows(table_name, UPDATE=row["TO-UPDATE"], WHERE=row["WHERE"],
+ modified_time=created_time)
+ continue
+ if table_name in self.tables_with_created_field:
+ if "created_at" in row:
+ created_time_param = created_time + (index + row.pop("created_at"))*0.00001
+ else:
+ created_time_param = created_time + index*0.00001
+ index += 1
+ else:
+ created_time_param = 0
+ self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
+ confidential_data=confidential_data,
+ created_time=created_time_param)
+
+ @retry
+ @with_transaction
+ def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
+ created_time = time.time()
+ #instance_scenarios
+ datacenter_id = scenarioDict['datacenter_id']
+ INSERT_={'tenant_id': tenant_id,
+ 'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
+ 'name': instance_scenario_name,
+ 'description': instance_scenario_description,
+ 'scenario_id' : scenarioDict['uuid'],
+ 'datacenter_id': datacenter_id
+ }
+ if scenarioDict.get("cloud-config"):
+ INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
+
+ instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+
+ net_scene2instance={}
+ #instance_nets #nets interVNF
+ for net in scenarioDict['nets']:
+ net_scene2instance[ net['uuid'] ] ={}
+ datacenter_site_id = net.get('datacenter_id', datacenter_id)
+ if not "vim_id_sites" in net:
+ net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
+ net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
+ sce_net_id = net.get("uuid")
+
+ for datacenter_site_id,vim_id in net["vim_id_sites"].items():
+ INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+ if not net.get('created', False):
+ INSERT_['status'] = "ACTIVE"
+ if sce_net_id:
+ INSERT_['sce_net_id'] = sce_net_id
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vnfs
+ for vnf in scenarioDict['vnfs']:
+ datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
+ INSERT_={'instance_scenario_id': instance_uuid, 'vnf_id': vnf['vnf_id'] }
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+ if vnf.get("uuid"):
+ INSERT_['sce_vnf_id'] = vnf['uuid']
+ created_time += 0.00001
+ instance_vnf_uuid = self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
+ vnf['uuid'] = instance_vnf_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_nets #nets intraVNF
+ for net in vnf['nets']:
+ net_scene2instance[ net['uuid'] ] = {}
+ INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
+ if net.get("uuid"):
+ INSERT_['net_id'] = net['uuid']
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vms
+ for vm in vnf['vms']:
+ INSERT_={'instance_vnf_id': instance_vnf_uuid, 'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id'] }
+ created_time += 0.00001
+ instance_vm_uuid = self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
+ vm['uuid'] = instance_vm_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_interfaces
+ for interface in vm['interfaces']:
+ net_id = interface.get('net_id', None)
+ if net_id is None:
+ #check if is connected to a inter VNFs net
+ for iface in vnf['interfaces']:
+ if iface['interface_id'] == interface['uuid']:
+ if 'ip_address' in iface:
+ interface['ip_address'] = iface['ip_address']
+ net_id = iface.get('sce_net_id', None)
+ break
+ if net_id is None:
+ continue
+ interface_type='external' if interface['external_name'] is not None else 'internal'
+ INSERT_={'instance_vm_id': instance_vm_uuid, 'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
+ 'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type': interface_type,
+ 'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
+ 'port_security': int(interface.get('port-security',True))}
+ #created_time += 0.00001
+ interface_uuid = self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
+ interface['uuid'] = interface_uuid #overwrite scnario uuid by instance uuid
+ return instance_uuid
+
+ @retry
+ @with_transaction(cursor='dict')
+ def get_instance_scenario(self, instance_id, tenant_id=None, verbose=False):
+ '''Obtain the instance information, filtering by one or several of the tenant, uuid or name
+ instance_id is the uuid or the name if it is not a valid uuid format
+ Only one instance must mutch the filtering or an error is returned
+ '''
+ # instance table
+ where_list = []
+ if tenant_id:
+ where_list.append("inst.tenant_id='{}'".format(tenant_id))
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append("inst.uuid='{}'".format(instance_id))
+ else:
+ where_list.append("inst.name='{}'".format(instance_id))
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT inst.uuid as uuid, inst.name as name, inst.scenario_id as scenario_id, datacenter_id"\
+ " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" \
+ " ,inst.description as description, inst.created_at as created_at" \
+ " ,inst.cloud_config as cloud_config, s.osm_id as nsd_osm_id" \
+ " FROM instance_scenarios as inst left join scenarios as s on inst.scenario_id=s.uuid" \
+ " WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount == 0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
+ elif self.cur.rowcount > 1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text,
+ httperrors.Bad_Request)
+ instance_dict = rows[0]
+ if instance_dict["cloud_config"]:
+ instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"], Loader=yaml.Loader)
+ del instance_dict["cloud_config"]
+
+ # instance_vnfs
+ cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
+ ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
+ "FROM instance_vnfs as iv left join sce_vnfs as sv "\
+ " on iv.sce_vnf_id=sv.uuid join vnfs as v on iv.vnf_id=v.uuid " \
+ "WHERE iv.instance_scenario_id='{}' " \
+ "ORDER BY iv.created_at ".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['vnfs'] = self.cur.fetchall()
+ for vnf in instance_dict['vnfs']:
+ vnf["ip_address"] = None
+ vnf_mgmt_access_iface = None
+ vnf_mgmt_access_vm = None
+ if vnf["mgmt_access"]:
+ vnf_mgmt_access = yaml.load(vnf["mgmt_access"], Loader=yaml.Loader)
+ vnf_mgmt_access_iface = vnf_mgmt_access.get("interface_id")
+ vnf_mgmt_access_vm = vnf_mgmt_access.get("vm_id")
+ vnf["ip_address"] = vnf_mgmt_access.get("ip-address")
+
+ # instance vms
+ cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
+ "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid, related"\
+ " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
+ " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ vm_manage_iface_list=[]
+ # instance_interfaces
+ cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
+ " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
+ " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
+ " WHERE instance_vm_id='{}' ORDER BY i.created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd )
+ vm['interfaces'] = self.cur.fetchall()
+ for iface in vm['interfaces']:
+ if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
+ if not vnf["ip_address"]:
+ vnf["ip_address"] = iface["ip_address"]
+ if iface["type"] == "mgmt" and iface["ip_address"]:
+ vm_manage_iface_list.append(iface["ip_address"])
+ if not verbose:
+ del iface["type"]
+ del iface["uuid"]
+ if vm_manage_iface_list:
+ vm["ip_address"] = ",".join(vm_manage_iface_list)
+ if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
+ vnf["ip_address"] = vm["ip_address"]
+ del vm["vm_uuid"]
+
+ #instance_nets
+ #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
+ #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
+ # "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
+ #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
+ cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
+ "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
+ "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name, related " \
+ "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
+ "left join nets on inets.net_id=nets.uuid " \
+ "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['nets'] = self.cur.fetchall()
+
+ # instance sdn_nets:
+ cmd = "SELECT * FROM instance_wim_nets WHERE instance_scenario_id='{}' ORDER BY created_at;".format(
+ instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sdn_nets'] = self.cur.fetchall()
+
+ #instance_sfps
+ cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfps" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfps'] = self.cur.fetchall()
+
+ # for sfp in instance_dict['sfps']:
+ #instance_sfs
+ cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfs" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfs'] = self.cur.fetchall()
+
+ #for sf in instance_dict['sfs']:
+ #instance_sfis
+ cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfis" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfis'] = self.cur.fetchall()
+# for sfi in instance_dict['sfi']:
+
+ #instance_classifications
+ cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_classifications" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['classifications'] = self.cur.fetchall()
+# for classification in instance_dict['classifications']
+
+ db_base._convert_datetime2str(instance_dict)
+ db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
+ return instance_dict
+
+ @retry(command='delete', extra='No dependences can avoid deleting!!!!')
+ @with_transaction(cursor='dict')
+ def delete_instance_scenario(self, instance_id, tenant_id=None):
+ '''Deletes a instance_Scenario, filtering by one or several of the tenant, uuid or name
+ instance_id is the uuid or the name if it is not a valid uuid format
+ Only one instance_scenario must mutch the filtering or an error is returned
+ '''
+ #instance table
+ where_list=[]
+ if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append( "uuid='" + instance_id +"'" )
+ else:
+ where_list.append( "name='" + instance_id +"'" )
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
+ instance_uuid = rows[0]["uuid"]
+ instance_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return instance_uuid + " " + instance_name
+
+ @retry(table='instance_scenarios')
+ @with_transaction
+ def new_instance_scenario(self, instance_scenario_dict, tenant_id):
+ #return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
+ return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
+
+ def update_instance_scenario(self, instance_scenario_dict):
+ #TODO:
+ return
+
+ @retry(table='instance_vnfs')
+ @with_transaction
+ def new_instance_vnf(self, instance_vnf_dict, tenant_id, instance_scenario_id = None):
+ #return self.new_row('vms', vm_dict, tenant_id, True, True)
+ return self._new_row_internal('instance_vnfs', instance_vnf_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_vnf(self, instance_vnf_dict):
+ #TODO:
+ return
+
+ def delete_instance_vnf(self, instance_vnf_id):
+ #TODO:
+ return
+
+ @retry(table='instance_vms')
+ @with_transaction
+ def new_instance_vm(self, instance_vm_dict, tenant_id, instance_scenario_id = None):
+ #return self.new_row('vms', vm_dict, tenant_id, True, True)
+ return self._new_row_internal('instance_vms', instance_vm_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_vm(self, instance_vm_dict):
+ #TODO:
+ return
+
+ def delete_instance_vm(self, instance_vm_id):
+ #TODO:
+ return
+
+ @retry(table='instance_nets')
+ @with_transaction
+ def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
+ return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_net(self, instance_net_dict):
+ #TODO:
+ return
+
+ def delete_instance_net(self, instance_net_id):
+ #TODO:
+ return
+
+ @retry(table='instance_interfaces')
+ @with_transaction
+ def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
+ return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+ def update_instance_interface(self, instance_interface_dict):
+ #TODO:
+ return
+
+ def delete_instance_interface(self, instance_interface_dict):
+ #TODO:
+ return
+
+ @retry(table='datacenter_nets')
+ @with_transaction
+ def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
+ ''' Removes the old and adds the new net list at datacenter list for one datacenter.
+ Attribute
+ datacenter_id: uuid of the datacenter to act upon
+ table: table where to insert
+ new_net_list: the new values to be inserted. If empty it only deletes the existing nets
+ Return: (Inserted items, Deleted items) if OK, (-Error, text) if error
+ '''
+ created_time = time.time()
+ cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ inserted = 0
+ for new_net in new_net_list:
+ created_time += 0.00001
+ self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
+ inserted += 1
+ return inserted, deleted
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API
+'''
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$09-oct-2014 09:09:48$"
+
+#Basis schemas
+patern_name="^[ -~]+$"
+passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
+nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
+name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 } #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+# allows [] for wildcards. For that reason huge length limit is set
+pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\[\]]{12,40}$"}
+
+http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
+integer0_schema={"type":"integer","minimum":0}
+integer1_schema={"type":"integer","minimum":1}
+path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+vlan_schema={"type":"integer","minimum":1,"maximum":4095}
+vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"} #must be unicast LSB bit of MSB byte ==0
+#mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
+ip_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
+ip_prefix_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
+port_schema={"type":"integer","minimum":1,"maximum":65534}
+object_schema={"type":"object"}
+schema_version_2={"type":"integer","minimum":2,"maximum":2}
+#schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
+string_list = {"type": "array", "items": {"type": "string"}}
+log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
+size_schema={"type":"integer","minimum":1,"maximum":100}
+boolean_schema = {"type": "boolean"}
+null_schema = {"type": "null"}
+
+metadata_schema={
+ "type":"object",
+ "properties":{
+ "architecture": {"type":"string"},
+ "use_incremental": {"type":"string","enum":["yes","no"]},
+ "vpci": pci_schema,
+ "os_distro": {"type":"string"},
+ "os_type": {"type":"string"},
+ "os_version": {"type":"string"},
+ "bus": {"type":"string"},
+ "topology": {"type":"string", "enum": ["oneSocket"]}
+ }
+}
+
+#Schema for the configuration file
+config_schema = {
+ "title":"configuration response information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "http_port": port_schema,
+ "http_admin_port": port_schema,
+ "http_host": nameshort_schema,
+ "auto_push_VNF_to_VIMs": boolean_schema,
+ "vnf_repository": path_schema,
+ "db_host": nameshort_schema,
+ "db_user": nameshort_schema,
+ "db_passwd": {"type":"string"},
+ "db_name": nameshort_schema,
+ "db_ovim_host": nameshort_schema,
+ "db_ovim_user": nameshort_schema,
+ "db_ovim_passwd": {"type":"string"},
+ "db_ovim_name": nameshort_schema,
+ # Next fields will disappear once the MANO API includes appropriate primitives
+ "vim_url": http_schema,
+ "vim_url_admin": http_schema,
+ "vim_name": nameshort_schema,
+ "vim_tenant_name": nameshort_schema,
+ "mano_tenant_name": nameshort_schema,
+ "mano_tenant_id": id_schema,
+ "http_console_proxy": boolean_schema,
+ "http_console_host": nameshort_schema,
+ "http_console_ports": {
+ "type": "array",
+ "items": {"oneOf": [
+ port_schema,
+ {"type": "object", "properties": {"from": port_schema, "to": port_schema}, "required": ["from", "to"]}
+ ]}
+ },
+ "log_level": log_level_schema,
+ "log_socket_level": log_level_schema,
+ "log_level_db": log_level_schema,
+ "log_level_vim": log_level_schema,
+ "log_level_wim": log_level_schema,
+ "log_level_nfvo": log_level_schema,
+ "log_level_http": log_level_schema,
+ "log_level_console": log_level_schema,
+ "log_level_ovim": log_level_schema,
+ "log_level_sdn": log_level_schema,
+ "log_level_sdnconn": log_level_schema,
+ "log_file_db": path_schema,
+ "log_file_vim": path_schema,
+ "log_file_wim": path_schema,
+ "log_file_nfvo": path_schema,
+ "log_file_http": path_schema,
+ "log_file_console": path_schema,
+ "log_file_ovim": path_schema,
+ "log_file_sdn": path_schema,
+ "log_file_sdnconn": path_schema,
+ "log_socket_host": nameshort_schema,
+ "log_socket_port": port_schema,
+ "log_file": path_schema,
+ },
+ "required": ['db_user', 'db_passwd', 'db_name'],
+ "additionalProperties": False
+}
+
+tenant_schema = {
+ "title":"tenant information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "tenant":{
+ "type":"object",
+ "properties":{
+ "name": nameshort_schema,
+ "description": description_schema,
+ },
+ "required": ["name"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["tenant"],
+ "additionalProperties": False
+}
+
+tenant_edit_schema = {
+ "title":"tenant edit information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "tenant":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ },
+ "additionalProperties": False
+ }
+ },
+ "required": ["tenant"],
+ "additionalProperties": False
+}
+
+datacenter_schema_properties={
+ "name": name_schema,
+ "description": description_schema,
+ "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarged with plugins
+ "vim_url": description_schema,
+ "vim_url_admin": description_schema,
+ "config": { "type":"object" }
+}
+
+datacenter_schema = {
+ "title":"datacenter information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type":"object",
+ "properties":datacenter_schema_properties,
+ "required": ["name", "vim_url"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+}
+
+
+datacenter_edit_schema = {
+ "title":"datacenter edit nformation schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type":"object",
+ "properties":datacenter_schema_properties,
+ "additionalProperties": False
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+}
+
+
+netmap_new_schema = {
+ "title":"netmap new information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "netmap":{ #delete from datacenter
+ "type":"object",
+ "properties":{
+ "name": name_schema, #name or uuid of net to change
+ "vim_id": id_schema,
+ "vim_name": name_schema
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ },
+ "required": ["netmap"],
+ "additionalProperties": False
+}
+
+netmap_edit_schema = {
+ "title":"netmap edit information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "netmap":{ #delete from datacenter
+ "type":"object",
+ "properties":{
+ "name": name_schema, #name or uuid of net to change
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ },
+ "required": ["netmap"],
+ "additionalProperties": False
+}
+
+datacenter_action_schema = {
+ "title":"datacenter action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "check-connectivity": {"type": "null"},
+ "net-update": {"type": "null"},
+ "net-edit": {
+ "type":"object",
+ "properties":{
+ "net": name_schema, #name or uuid of net to change
+ "name": name_schema,
+ "description": description_schema,
+ "shared": boolean_schema
+ },
+ "minProperties": 1,
+ "additionalProperties": False
+ },
+ "net-delete":{
+ "type":"object",
+ "properties":{
+ "net": name_schema, #name or uuid of net to change
+ },
+ "required": ["net"],
+ "additionalProperties": False
+ },
+ },
+ "minProperties": 1,
+ "maxProperties": 1,
+ "additionalProperties": False
+}
+
+
+datacenter_associate_schema={
+ "title":"datacenter associate information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "datacenter":{
+ "type": "object",
+ "properties": {
+ "name": name_schema,
+ "vim_id": id_schema,
+ "vim_tenant": name_schema,
+ "vim_tenant_name": name_schema,
+ "vim_username": nameshort_schema,
+ "vim_password": nameshort_schema,
+ "config": {"type": "object"}
+ },
+ # "required": ["vim_tenant"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["datacenter"],
+ "additionalProperties": False
+}
+
+dhcp_schema = {
+ "title": "DHCP schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties":{
+ "enabled": boolean_schema,
+ "start-address": {"oneOf": [null_schema, ip_schema]},
+ "count": integer0_schema
+ },
+ # "required": ["start-address", "count"],
+}
+
+ip_profile_schema = {
+ "title": "IP profile schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "ip-version": {"type": "string", "enum": ["IPv4", "IPv6"]},
+ "subnet-address": ip_prefix_schema,
+ "gateway-address": ip_schema,
+ "dns-address": {"oneOf": [ip_schema, # for backward compatibility
+ {"type": "array", "items": ip_schema}]},
+ "dhcp": dhcp_schema
+ },
+}
+
+key_pair_schema = {
+ "title": "Key-pair schema for cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "key": {"type":"string"}
+ },
+ "required": ["key"],
+ "additionalProperties": False
+}
+
+cloud_config_user_schema = {
+ "title": "User schema for cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name": nameshort_schema,
+ "user-info": {"type":"string"},
+ #"key-pairs": {"type" : "array", "items": key_pair_schema}
+ "key-pairs": {"type" : "array", "items": {"type":"string"}}
+ },
+ "required": ["name"],
+ "additionalProperties": False
+}
+
+cloud_config_schema = {
+ "title": "Cloud-init configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ #"key-pairs": {"type" : "array", "items": key_pair_schema},
+ "key-pairs": {"type" : "array", "items": {"type":"string"}},
+ "users": {"type" : "array", "items": cloud_config_user_schema}
+ },
+ "additionalProperties": False
+}
+
+internal_connection_element_schema = {
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema
+ }
+}
+
+internal_connection_element_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema,
+ "ip_address": ip_schema
+ }
+}
+
+internal_connection_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description":description_schema,
+ "type":{"type":"string", "enum":["bridge","data","ptp"]},
+ "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":1}
+ },
+ "required": ["name", "type", "elements"],
+ "additionalProperties": False
+}
+
+internal_connection_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description":description_schema,
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "ip-profile": ip_profile_schema,
+ "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":1}
+ },
+ "required": ["name", "type", "implementation", "elements"],
+ "additionalProperties": False
+}
+
+external_connection_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "type":{"type":"string", "enum":["mgmt","bridge","data"]},
+ "VNFC": name_schema,
+ "local_iface_name": name_schema ,
+ "description":description_schema
+ },
+ "required": ["name", "type", "VNFC", "local_iface_name"],
+ "additionalProperties": False
+}
+
+#Not yet used
+external_connection_schema_v02 = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "mgmt": boolean_schema,
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "VNFC": name_schema,
+ "local_iface_name": name_schema ,
+ "description":description_schema
+ },
+ "required": ["name", "type", "VNFC", "local_iface_name"],
+ "additionalProperties": False
+}
+
+interfaces_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+ "bandwidth":bandwidth_schema,
+ "vpci":pci_schema,
+ "mac_address": mac_schema
+ },
+ "additionalProperties": False,
+ "required": ["name","dedicated", "bandwidth"]
+ }
+}
+
+bridge_interfaces_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "bandwidth":bandwidth_schema,
+ "vpci":pci_schema,
+ "mac_address": mac_schema,
+ "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139", "paravirt"]},
+ "port-security": boolean_schema,
+ "floating-ip": boolean_schema,
+ },
+ "additionalProperties": False,
+ "required": ["name"]
+ }
+}
+
+devices_schema={
+ "type":"array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "type":{"type":"string", "enum":["disk","cdrom","xml"] },
+ "image": path_schema,
+ "image name": name_schema,
+ "image checksum": checksum_schema,
+ "image metadata": metadata_schema,
+ "size": size_schema,
+ "vpci":pci_schema,
+ "xml":xml_text_schema,
+ "name": name_schema,
+ },
+ "additionalProperties": False,
+ "required": ["type"]
+ }
+}
+
+
+numa_schema = {
+ "type": "object",
+ "properties": {
+ "memory":integer1_schema,
+ "cores":integer1_schema,
+ "paired-threads":integer1_schema,
+ "threads":integer1_schema,
+ "cores-id":{"type":"array","items":integer0_schema},
+ "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+ "threads-id":{"type":"array","items":integer0_schema},
+ "interfaces":interfaces_schema
+ },
+ "additionalProperties": False,
+ #"required": ["memory"]
+}
+
+config_files_schema = {
+ "title": "Config files for cloud init schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "dest": path_schema,
+ "encoding": {"type": "string", "enum": ["b64", "base64", "gz", "gz+b64", "gz+base64", "gzip+b64", "gzip+base64"]}, #by default text
+ "content": {"type": "string"},
+ "permissions": {"type": "string"}, # tiypically octal notation '0644'
+ "owner": {"type": "string"}, # format: owner:group
+
+ },
+ "additionalProperties": False,
+ "required": ["dest", "content"],
+}
+
+boot_data_vdu_schema = {
+ "title": "Boot data (Cloud-init) configuration schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties":{
+ "key-pairs": {"type" : "array", "items": {"type":"string"}},
+ "users": {"type" : "array", "items": cloud_config_user_schema},
+ "user-data": {"type" : "string"}, # scrip to run
+ "config-files": {"type": "array", "items": config_files_schema},
+ # NOTE: “user-data” are mutually exclusive with users and config-files because user/files are injected using user-data
+ "boot-data-drive": boolean_schema,
+ },
+ "additionalProperties": False,
+}
+
+vnfc_schema = {
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "count": integer1_schema,
+ "image name": name_schema,
+ "availability_zone": name_schema,
+ "VNFC image": {"oneOf": [path_schema, http_schema]},
+ "image checksum": checksum_schema,
+ "image metadata": metadata_schema,
+ #"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+ "processor": {
+ "type":"object",
+ "properties":{
+ "model":description_schema,
+ "features":{"type":"array","items":nameshort_schema}
+ },
+ "required": ["model"],
+ "additionalProperties": False
+ },
+ "hypervisor": {
+ "type":"object",
+ "properties":{
+ "type":nameshort_schema,
+ "version":description_schema
+ },
+ },
+ "ram":integer0_schema,
+ "vcpus":integer0_schema,
+ "disk": integer1_schema,
+ "numas": {
+ "type": "array",
+ "items": numa_schema
+ },
+ "bridge-ifaces": bridge_interfaces_schema,
+ "devices": devices_schema,
+ "boot-data" : boot_data_vdu_schema
+
+ },
+ "required": ["name"],
+ "oneOf": [
+ {"required": ["VNFC image"]},
+ {"required": ["image name"]}
+ ],
+ "additionalProperties": False
+}
+
+vnfd_schema_v01 = {
+ "title":"vnfd information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "vnf":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+
+ "class": nameshort_schema,
+ "public": boolean_schema,
+ "physical": boolean_schema,
+ "default_user": name_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+ "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
+ "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+ },
+ "required": ["name","external-connections"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["vnf"],
+ "additionalProperties": False
+}
+
+#VNFD schema for OSM R1
+vnfd_schema_v02 = {
+ "title":"vnfd information schema v0.2",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": {"type": "string", "enum": ["0.2"]},
+ "vnf":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "class": nameshort_schema,
+ "public": boolean_schema,
+ "physical": boolean_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+ "internal-connections": {"type" : "array", "items": internal_connection_schema_v02, "minItems":1},
+ # "cloud-config": cloud_config_schema, #common for all vnfcs
+ "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+ },
+ "required": ["name"],
+ "additionalProperties": True
+ }
+ },
+ "required": ["vnf", "schema_version"],
+ "additionalProperties": False
+}
+
+#vnfd_schema = vnfd_schema_v01
+#{
+# "title":"vnfd information schema v0.2",
+# "$schema": "http://json-schema.org/draft-04/schema#",
+# "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
+#}
+
+graph_schema = {
+ "title":"graphical scenario descriptor information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "x": integer0_schema,
+ "y": integer0_schema,
+ "ifaces": {
+ "type":"object",
+ "properties":{
+ "left": {"type":"array"},
+ "right": {"type":"array"},
+ "bottom": {"type":"array"},
+ }
+ }
+ },
+ "required": ["x","y"]
+}
+
+nsd_schema_v01 = {
+ "title":"network scenario descriptor information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": boolean_schema,
+ "topology":{
+ "type":"object",
+ "properties":{
+ "nodes": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ },
+ "patternProperties":{
+ "^(VNF )?model$": {"type": "string"}
+ },
+ "required": ["type"]
+ }
+ }
+ },
+ "connections": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
+ "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
+ "graph": graph_schema
+ },
+ "required": ["nodes"]
+ },
+ }
+ }
+ },
+ "required": ["nodes"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["name","topology"],
+ "additionalProperties": False
+}
+
+nsd_schema_v02 = {
+ "title":"network scenario descriptor information schema v0.2",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": schema_version_2,
+ "scenario":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": boolean_schema,
+ "vnfs": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ "vnf_name": name_schema,
+ },
+ }
+ },
+ "minProperties": 1
+ },
+ "networks": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{"type":"array", "minLength":1},
+ "type": {"type": "string", "enum":["dataplane", "bridge"]},
+ "external" : boolean_schema,
+ "graph": graph_schema
+ },
+ "required": ["interfaces"]
+ },
+ }
+ },
+
+ },
+ "required": ["vnfs", "name"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["scenario","schema_version"],
+ "additionalProperties": False
+}
+
+#NSD schema for OSM R1
+nsd_schema_v03 = {
+ "title":"network scenario descriptor information schema v0.3",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "schema_version": {"type": "string", "enum": ["0.3"]},
+ "scenario":{
+ "type":"object",
+ "properties":{
+ "name": name_schema,
+ "description": description_schema,
+ "tenant_id": id_schema, #only valid for admin
+ "public": boolean_schema,
+ "cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+ #"datacenter": name_schema,
+ "vnfs": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "vnf_id": id_schema,
+ "graph": graph_schema,
+ "vnf_name": name_schema,
+ #"cloud-config": cloud_config_schema, #particular for a vnf
+ #"datacenter": name_schema,
+ "internal-connections": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "ip-profile": ip_profile_schema,
+ "elements": {
+ "type" : "array",
+ "items":{
+ "type":"object",
+ "properties":{
+ "VNFC": name_schema,
+ "local_iface_name": name_schema,
+ "ip_address": ip_schema,
+ },
+ "required": ["VNFC", "local_iface_name"],
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ },
+ "minProperties": 1
+ },
+ "networks": {
+ "type":"object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ "vnf": name_schema,
+ "vnf_interface": name_schema,
+ "ip_address": ip_schema
+ },
+ "required": ["vnf", "vnf_interface"],
+ }
+ },
+ "type": {"type": "string", "enum":["e-line", "e-lan"]},
+ "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+ "external" : boolean_schema,
+ "graph": graph_schema,
+ "ip-profile": ip_profile_schema
+ },
+ "required": ["interfaces"]
+ },
+ }
+ },
+
+ },
+ "required": ["vnfs", "networks","name"],
+ "additionalProperties": False
+ }
+ },
+ "required": ["scenario","schema_version"],
+ "additionalProperties": False
+}
+
+#scenario_new_schema = {
+# "title":"new scenario information schema",
+# "$schema": "http://json-schema.org/draft-04/schema#",
+# #"oneOf": [nsd_schema_v01, nsd_schema_v02]
+# "oneOf": [nsd_schema_v01]
+#}
+
+scenario_edit_schema = {
+ "title":"edit scenario information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "name":name_schema,
+ "description": description_schema,
+ "topology":{
+ "type":"object",
+ "properties":{
+ "nodes": {
+ "type":"object",
+ "patternProperties":{
+ "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
+ "type":"object",
+ "properties":{
+ "graph":{
+ "type": "object",
+ "properties":{
+ "x": integer0_schema,
+ "y": integer0_schema,
+ "ifaces":{ "type": "object"}
+ }
+ },
+ "description": description_schema,
+ "name": name_schema
+ }
+ }
+ }
+ }
+ },
+ "required": ["nodes"],
+ "additionalProperties": False
+ }
+ },
+ "additionalProperties": False
+}
+
+scenario_action_schema = {
+ "title":"scenario action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type":"object",
+ "properties":{
+ "start":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "deploy":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "reserve":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ },
+ "verify":{
+ "type": "object",
+ "properties": {
+ "instance_name":name_schema,
+ "description":description_schema,
+ "datacenter": {"type": "string"}
+ },
+ "required": ["instance_name"]
+ }
+ },
+ "minProperties": 1,
+ "maxProperties": 1,
+ "additionalProperties": False
+}
+
+instance_scenario_object = {
+ "title": "scenario object used to create an instance not based on any nsd",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "nets": {
+ "type": "array",
+ "minLength": 1,
+ "items": {
+ "type": "object",
+ "properties": {
+ "name": name_schema,
+ "external": boolean_schema,
+ "type": {"enum": ["bridge", "ptp", "data"]}, # for overlay, underlay E-LINE, underlay E-LAN
+ },
+ "additionalProperties": False,
+ "required": ["name", "external", "type"]
+ }
+ }
+ },
+ "additionalProperties": False,
+ "required": ["nets"]
+}
+
+instance_scenario_create_schema_v01 = {
+ "title": "instance scenario create information schema v0.1",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "schema_version": {"type": "string", "enum": ["0.1"]},
+ "instance": {
+ "type": "object",
+ "properties": {
+ "mgmt_keys": {"type": "array", "items": {"type":"string"}},
+ "vduImage": name_schema,
+ "name": name_schema,
+ "description":description_schema,
+ "datacenter": name_schema,
+ "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
+ "scenario" : {"oneOff": [name_schema, instance_scenario_object]}, # can be an UUID or name or a dict
+ "action":{"enum": ["deploy","reserve","verify" ]},
+ "connect_mgmt_interfaces": {"oneOf": [boolean_schema, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+ "cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
+ "vnfs":{ #mapping from scenario to datacenter
+ "type": "object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "name": name_schema, #override vnf name
+ "datacenter": name_schema,
+ #"metadata": {"type": "object"},
+ #"user_data": {"type": "string"}
+ #"cloud-config": cloud_config_schema, #particular for a vnf
+ "vdus": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "name": name_schema, # overrides vdu name schema
+ "mgmt_keys": string_list,
+ "vduImage": name_schema,
+ "cloud_init": string_list,
+ "devices": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "vim_id": name_schema,
+ }
+ }
+ },
+ "interfaces": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "ip_address": ip_schema,
+ "mac_address": mac_schema,
+ "floating-ip": boolean_schema,
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "networks": {
+ "type": "object",
+ "patternProperties": {
+ ".": {
+ "type": "object",
+ "properties": {
+ "vim-network-name": name_schema,
+ "vim-network-id": name_schema,
+ "ip-profile": ip_profile_schema,
+ "name": name_schema,
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ },
+ "networks":{ #mapping from scenario to datacenter
+ "type": "object",
+ "patternProperties":{
+ ".": {
+ "type": "object",
+ "properties":{
+ "interfaces":{
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ "ip_address": ip_schema,
+ "datacenter": name_schema,
+ "vim-network-name": name_schema,
+ "vim-network-id": name_schema
+ },
+ "patternProperties":{
+ ".": {"type": "string"}
+ }
+ }
+ },
+ "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
+ "ip-profile": ip_profile_schema,
+ "use-network": {
+ "type": "object",
+ "properties": {
+ "instance_scenario_id": id_schema,
+ # "member_vnf_index": name_schema, # if not null, network inside VNF
+ "osm_id": name_schema, # sce_network osm_id or name
+ },
+ "additionalProperties": False,
+ "required": ["instance_scenario_id", "osm_id"]
+ },
+ #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
+ "sites": {
+ "type":"array",
+ "minLength":1,
+ "items":{
+ "type":"object",
+ "properties":{
+ # By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
+ # for other networks openamno creates at VIM
+ # Use netmap-create to force to create an external scenario network
+ "netmap-create": {"oneOf":[name_schema,null_schema]}, #datacenter network to use. Null if must be created as an internal net
+ #netmap-use: Indicates an existing VIM network that must be used for this scenario network.
+ #Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
+ #If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
+ #In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
+ "netmap-use": name_schema, #
+ "vim-network-name": name_schema, #override network name
+ "vim-network-id": name_schema,
+ #"ip-profile": ip_profile_schema,
+ "datacenter": name_schema,
+ }
+ }
+ },
+ }
+ }
+ },
+ },
+ },
+ "additionalProperties": False,
+ "required": ["name"]
+ },
+ },
+ "required": ["instance"],
+ "additionalProperties": False
+}
+
+instance_scenario_action_schema = {
+ "title": "instance scenario action information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "start": null_schema,
+ "pause": null_schema,
+ "resume": null_schema,
+ "shutoff": null_schema,
+ "shutdown": null_schema,
+ "forceOff": null_schema,
+ "rebuild": null_schema,
+ "reboot": {
+ "type": ["object", "null"],
+ },
+ "add_public_key": {"type" : "string"},
+ "user": nameshort_schema,
+ "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
+ "vdu-scaling": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "vdu-id": id_schema,
+ "osm_vdu_id": name_schema,
+ "member-vnf-index": name_schema,
+ "count": integer1_schema,
+ "type": {"enum": ["create", "delete"]},
+ "cloud_init": string_list,
+ },
+ "additionalProperties": False,
+ "minProperties": 1,
+ "required": ["type"]
+ }
+ },
+ "vnfs": {"type": "array", "items": {"type": "string"}},
+ "vms": {"type": "array", "items": {"type": "string"}}
+ },
+ "minProperties": 1,
+ #"maxProperties": 1,
+ "additionalProperties": False
+}
+
+sdn_controller_properties={
+ "name": name_schema,
+ "dpid": {"type": "string", "pattern": "^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}$"},
+ "description": name_schema,
+ "ip": ip_schema,
+ "port": port_schema,
+ "type": nameshort_schema,
+ "url": name_schema,
+ "version": {"type": "string", "minLength": 1, "maxLength": 12},
+ "user": nameshort_schema,
+ "password": passwd_schema,
+ "config": object_schema,
+}
+sdn_controller_schema = {
+ "title": "sdn controller information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties":{
+ "sdn_controller":{
+ "type": "object",
+ "properties": sdn_controller_properties,
+ "required": ["name", 'type'],
+ "additionalProperties": False
+ }
+ },
+ "required": ["sdn_controller"],
+ "additionalProperties": False
+}
+
+sdn_controller_edit_schema = {
+ "title": "sdn controller update information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "sdn_controller": {
+ "type": "object",
+ "properties": sdn_controller_properties,
+ "additionalProperties": False
+ }
+ },
+ "required": ["sdn_controller"],
+ "additionalProperties": False
+}
+
+sdn_port_mapping_schema = {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title":"sdn port mapping information schema",
+ "type": "object",
+ "properties": {
+ "sdn_port_mapping": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "compute_node": nameshort_schema,
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "pci": {"oneOf": [null_schema, pci_extended_schema]}, # pci_schema,
+ "switch_port": nameshort_schema,
+ "switch_id": nameshort_schema,
+ "switch_dpid": nameshort_schema,
+ "switch_mac": mac_schema
+ },
+ "required": ["pci"]
+ }
+ }
+ },
+ "required": ["compute_node", "ports"]
+ }
+ }
+ },
+ "required": ["sdn_port_mapping"]
+}
+
+sdn_external_port_schema = {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title":"External port ingformation",
+ "type": "object",
+ "properties": {
+ "port": {"type" : "string", "minLength":1, "maxLength":60},
+ "vlan": vlan_schema,
+ "mac": mac_schema
+ },
+ "required": ["port"]
+}
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano python client used to interact with openmano-server
+"""
+
+import requests
+import json
+import yaml
+import logging
+import sys
+
+__author__ = "Alfonso Tierno, Pablo Montes"
+__date__ = "$09-Mar-2016 09:09:48$"
+__version__ = "0.1.0-r470"
+version_date = "Oct 2017"
+
+from urllib.parse import quote
+
+class OpenmanoException(Exception):
+ '''Common Exception for all openmano client exceptions'''
+
+class OpenmanoBadParamsException(OpenmanoException):
+ '''Bad or missing input parameters'''
+
+class OpenmanoResponseException(OpenmanoException):
+ '''Unexpected response from openmano server'''
+
+class OpenmanoNotFoundException(OpenmanoException):
+ '''Not found at server'''
+
+# class vnf():
+# def __init__(self, message):
+# print "Error: %s" %message
+# print
+# self.print_usage()
+# #self.print_help()
+# print
+# print "Type 'openmano -h' for help"
+
+class openmanoclient():
+ headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'}
+
+ def __init__(self, **kwargs):
+ self.username = kwargs.get("username")
+ self.password = kwargs.get("password")
+ self.endpoint_url = kwargs.get("endpoint_url")
+ self.tenant_id = kwargs.get("tenant_id")
+ self.tenant_name = kwargs.get("tenant_name")
+ self.tenant = None
+ self.datacenter_id = kwargs.get("datacenter_id")
+ self.datacenter_name = kwargs.get("datacenter_name")
+ self.datacenter = None
+ self.logger = logging.getLogger(kwargs.get('logger','manoclient'))
+ if kwargs.get("debug"):
+ self.logger.setLevel(logging.DEBUG)
+
+ def __getitem__(self, index):
+ if index=='tenant_name':
+ return self.tenant_name
+ elif index=='tenant_id':
+ return self.tenant_id
+ elif index=='datacenter_name':
+ return self.datacenter_name
+ elif index=='datacenter_id':
+ return self.datacenter_id
+ elif index=='username':
+ return self.username
+ elif index=='password':
+ return self.password
+ elif index=='endpoint_url':
+ return self.endpoint_url
+ else:
+ raise KeyError("Invalid key '{}'".format(index))
+
+ def __setitem__(self,index, value):
+ if index=='tenant_name':
+ self.tenant_name = value
+ elif index=='tenant_id':
+ self.tenant_id = value
+ elif index=='datacenter_name':
+ self.datacenter_name = value
+ elif index=='datacenter_id':
+ self.datacenter_id = value
+ elif index=='username':
+ self.username = value
+ elif index=='password':
+ self.password = value
+ elif index=='endpoint_url':
+ self.endpoint_url = value
+ else:
+ raise KeyError("Invalid key '{}'".format(index))
+ self.tenant = None # force to reload tenant with different credentials
+ self.datacenter = None # force to reload datacenter with different credentials
+
+ def _parse(self, descriptor, descriptor_format, response=False):
+ #try yaml
+ if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml":
+ raise OpenmanoBadParamsException("'descriptor_format' must be a 'json' or 'yaml' text")
+ if descriptor_format != "json":
+ try:
+ return yaml.load(descriptor, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+ error_text = "yaml format error" + error_pos
+ elif descriptor_format != "yaml":
+ try:
+ return json.loads(descriptor)
+ except Exception as e:
+ if response:
+ error_text = "json format error" + str(e)
+
+ if response:
+ raise OpenmanoResponseException(error_text)
+ raise OpenmanoBadParamsException(error_text)
+
+ def _parse_yaml(self, descriptor, response=False):
+ try:
+ return yaml.load(descriptor, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ error_pos = ""
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+ error_text = "yaml format error" + error_pos
+ if response:
+ raise OpenmanoResponseException(error_text)
+ raise OpenmanoBadParamsException(error_text)
+
+
+ def _get_item_uuid(self, item, item_id=None, item_name=None, all_tenants=False):
+ if all_tenants == None:
+ tenant_text = ""
+ elif all_tenants == False:
+ tenant_text = "/" + self.tenant
+ else:
+ tenant_text = "/any"
+ URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ #print content
+ found = 0
+ if not item_id and not item_name:
+ raise OpenmanoResponseException("Missing either {0}_name or {0}_id".format(item[:-1]))
+ for i in content[item]:
+ if item_id and i["uuid"] == item_id:
+ return item_id
+ elif item_name and i["name"] == item_name:
+ uuid = i["uuid"]
+ found += 1
+
+ if found == 0:
+ if item_id:
+ raise OpenmanoNotFoundException("No {} found with id '{}'".format(item[:-1], item_id))
+ else:
+ #print(item, item_name)
+ raise OpenmanoNotFoundException("No {} found with name '{}'".format(item[:-1], item_name) )
+ elif found > 1:
+ raise OpenmanoNotFoundException("{} {} found with name '{}'. uuid must be used".format(found, item, item_name))
+ return uuid
+
+ def _get_item(self, item, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _get_tenant(self):
+ if not self.tenant:
+ self.tenant = self._get_item_uuid("tenants", self.tenant_id, self.tenant_name, None)
+ return self.tenant
+
+ def _get_datacenter(self):
+ if not self.tenant:
+ self._get_tenant()
+ if not self.datacenter:
+ self.datacenter = self._get_item_uuid("datacenters", self.datacenter_id, self.datacenter_name, False)
+ return self.datacenter
+
+ def _create_item(self, item, descriptor, all_tenants=False, api_version=None):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants is None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ payload_req = yaml.safe_dump(descriptor)
+
+ api_version_text = ""
+ if api_version:
+ api_version_text = "/v3"
+
+ #print payload_req
+
+ URLrequest = "{}{apiver}{tenant}/{item}".format(self.endpoint_url, apiver=api_version_text, tenant=tenant_text,
+ item=item)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text)
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code == 200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _del_item(self, item, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("DELETE %s", URLrequest )
+ mano_response = requests.delete(URLrequest, headers = self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _list_item(self, item, all_tenants=False, filter_dict=None):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+ separator="?"
+ if filter_dict:
+ for k in filter_dict:
+ URLrequest += separator + quote(str(k)) + "=" + quote(str(filter_dict[k]))
+ separator = "&"
+ self.logger.debug("openmano GET %s", URLrequest)
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def _edit_item(self, item, descriptor, uuid=None, name=None, all_tenants=False):
+ if all_tenants:
+ tenant_text = "/any"
+ elif all_tenants==None:
+ tenant_text = ""
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid("tenants", uuid, name, all_tenants)
+
+ payload_req = yaml.safe_dump(descriptor)
+
+ #print payload_req
+
+ URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+ self.logger.debug("openmano PUT %s %s", URLrequest, payload_req)
+ mano_response = requests.put(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ #TENANTS
+ def list_tenants(self, **kwargs):
+ '''Obtain a list of tenants
+ Params: can be filtered by 'uuid','name','description'
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenants':[{tenant1_info},{tenant2_info},...]}}
+ '''
+ return self._list_item("tenants", all_tenants=None, filter_dict=kwargs)
+
+ def get_tenant(self, uuid=None, name=None):
+ '''Obtain the information of a tenant
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'tenant':{tenant_info}}
+ '''
+ return self._get_item("tenants", uuid, name, all_tenants=None)
+
+ def delete_tenant(self, uuid=None, name=None):
+ '''Delete a tenant
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("tenants", uuid, name, all_tenants=None)
+
+ def create_tenant(self, descriptor=None, descriptor_format=None, name=None, description=None):
+ '''Creates a tenant
+ Params: must supply a descriptor or/and just a name
+ descriptor: with format {'tenant':{new_tenant_info}}
+ newtenant_info must contain 'name', and optionally 'description'
+ must be a dictionary or a json/yaml text.
+ name: the tenant name. Overwrite descriptor name if any
+ description: tenant descriptor.. Overwrite descriptor description if any
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenant':{new_tenant_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self._parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name:
+ descriptor={"tenant": {"name": name}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'tenant' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if name:
+ descriptor['tenant']['name'] = name
+ if description:
+ descriptor['tenant']['description'] = description
+
+ return self._create_item("tenants", descriptor, all_tenants=None)
+
+ def edit_tenant(self, uuid=None, name=None, descriptor=None, descriptor_format=None, new_name=None, new_description=None):
+ '''Edit the parameters of a tenant
+ Params: must supply a descriptor or/and a new_name or new_description
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'tenant':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ name: the tenant name. Overwrite descriptor name if any
+ description: tenant descriptor.. Overwrite descriptor description if any
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'tenant':{newtenant_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif new_name or new_description:
+ descriptor={"tenant": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'tenant' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if new_name:
+ descriptor['tenant']['name'] = new_name
+ if new_description:
+ descriptor['tenant']['description'] = new_description
+
+ return self._edit_item("tenants", descriptor, uuid, name, all_tenants=None)
+
+ #DATACENTERS
+
+ def list_datacenters(self, all_tenants=False, **kwargs):
+ '''Obtain a list of datacenters, that are the VIM information at openmano
+ Params: can be filtered by 'uuid','name','vim_url','type'
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'datacenters':[{datacenter1_info},{datacenter2_info},...]}}
+ '''
+ return self._list_item("datacenters", all_tenants, filter_dict=kwargs)
+
+ def get_datacenter(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a datacenter
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'datacenter':{datacenter_info}}
+ '''
+ return self._get_item("datacenters", uuid, name, all_tenants)
+
+ def delete_datacenter(self, uuid=None, name=None):
+ '''Delete a datacenter
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ if not uuid:
+ # check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+ return self._del_item("datacenters", uuid, name, all_tenants=None)
+
+ def create_datacenter(self, descriptor=None, descriptor_format=None, name=None, vim_url=None, **kwargs):
+#, type="openvim", public=False, description=None):
+ '''Creates a datacenter
+ Params: must supply a descriptor or/and just a name and vim_url
+ descriptor: with format {'datacenter':{new_datacenter_info}}
+ newdatacenter_info must contain 'name', 'vim_url', and optionally 'description'
+ must be a dictionary or a json/yaml text.
+ name: the datacenter name. Overwrite descriptor name if any
+ vim_url: the datacenter URL. Overwrite descriptor vim_url if any
+ vim_url_admin: the datacenter URL for administrative issues. Overwrite descriptor vim_url if any
+ vim_type: the datacenter type, can be openstack or openvim. Overwrite descriptor type if any
+ public: boolean, by default not public
+ description: datacenter description. Overwrite descriptor description if any
+ config: dictionary with extra configuration for the concrete datacenter
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name and vim_url:
+ descriptor={"datacenter": {"name": name, "vim_url": vim_url}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor, or name and vim_url")
+
+ if 'datacenter' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+ if name:
+ descriptor['datacenter']['name'] = name
+ if vim_url:
+ descriptor['datacenter']['vim_url'] = vim_url
+ for param in kwargs:
+ descriptor['datacenter'][param] = kwargs[param]
+
+ return self._create_item("datacenters", descriptor, all_tenants=None)
+
+ def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+ '''Edit the parameters of a datacenter
+ Params: must supply a descriptor or/and a parameter to change
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'datacenter':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ parameters to change can be supplyied by the descriptor or as parameters:
+ new_name: the datacenter name
+ vim_url: the datacenter URL
+ vim_url_admin: the datacenter URL for administrative issues
+ vim_type: the datacenter type, can be openstack or openvim.
+ public: boolean, available to other tenants
+ description: datacenter description
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif kwargs:
+ descriptor={"datacenter": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'datacenter' not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+ for param in kwargs:
+ if param=='new_name':
+ descriptor['datacenter']['name'] = kwargs[param]
+ else:
+ descriptor['datacenter'][param] = kwargs[param]
+ return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None)
+
+ def attach_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, vim_user=None, vim_password=None, vim_tenant_name=None, vim_tenant_id=None):
+ #check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+ tenant_text = "/"+self._get_tenant()
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+ descriptor={"datacenter": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor or params")
+
+ if vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+ #print args.name
+ try:
+ if vim_user:
+ descriptor['datacenter']['vim_user'] = vim_user
+ if vim_password:
+ descriptor['datacenter']['vim_password'] = vim_password
+ if vim_tenant_name:
+ descriptor['datacenter']['vim_tenant_name'] = vim_tenant_name
+ if vim_tenant_id:
+ descriptor['datacenter']['vim_tenant'] = vim_tenant_id
+ except (KeyError, TypeError) as e:
+ if str(e)=='datacenter': error_pos= "missing field 'datacenter'"
+ else: error_pos="wrong format"
+ raise OpenmanoBadParamsException("Wrong datacenter descriptor: " + error_pos)
+
+ payload_req = yaml.safe_dump(descriptor)
+ #print payload_req
+ URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def detach_datacenter(self, uuid=None, name=None):
+ if not uuid:
+ #check that exist
+ uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=False)
+ tenant_text = "/"+self._get_tenant()
+ URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano DELETE %s", URLrequest)
+ mano_response = requests.delete(URLrequest, headers = self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ # WIMS
+
+ def list_wims(self, all_tenants=False, **kwargs):
+ '''Obtain a list of wims, that are the WIM information at openmano
+ Params: can be filtered by 'uuid','name','wim_url','type'
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'wims':[{wim1_info},{wim2_info},...]}}
+ '''
+ return self._list_item("wims", all_tenants, filter_dict=kwargs)
+
+ def get_wim(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a wim
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'wim':{wim_info}}
+ '''
+ return self._get_item("wims", uuid, name, all_tenants)
+
+ def delete_wim(self, uuid=None, name=None):
+ '''Delete a wim
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ if not uuid:
+ # check that exist
+ uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+ return self._del_item("wims", uuid, name, all_tenants=None)
+
+ def create_wim(self, descriptor=None, descriptor_format=None, name=None, wim_url=None, **kwargs):
+ # , type="openvim", public=False, description=None):
+ '''Creates a wim
+ Params: must supply a descriptor or/and just a name and a wim_url
+ descriptor: with format {'wim':{new_wim_info}}
+ new_wim_info must contain 'name', 'wim_url', and optionally 'description'
+ must be a dictionary or a json/yaml text.
+ name: the wim name. Overwrite descriptor name if any
+ wim_url: the wim URL. Overwrite descriptor vim_url if any
+ wim_type: the WIM type, can be ietfl2vpn, odl, onos. Overwrite descriptor type if any
+ public: boolean, by default not public
+ description: wim description. Overwrite descriptor description if any
+ config: dictionary with extra configuration for the concrete wim
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'wim:{new_wim_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name and wim_url:
+ descriptor = {"wim": {"name": name, "wim_url": wim_url}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor, or name and wim_url")
+
+ if 'wim' not in descriptor or len(descriptor) != 1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+ if name:
+ descriptor['wim']['name'] = name
+ if wim_url:
+ descriptor['wim']['wim_url'] = wim_url
+ for param in kwargs:
+ descriptor['wim'][param] = kwargs[param]
+
+ return self._create_item("wims", descriptor, all_tenants=None)
+
+ def edit_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False,
+ **kwargs):
+ '''Edit the parameters of a wim
+ Params: must supply a descriptor or/and a parameter to change
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'wim':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ parameters to change can be supplied by the descriptor or as parameters:
+ new_name: the wim name
+ wim_url: the wim URL
+ wim_type: the wim type, can be ietfl2vpn, onos, odl
+ public: boolean, available to other tenants
+ description: wim description
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'wim':{new_wim_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif kwargs:
+ descriptor = {"wim": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'wim' not in descriptor or len(descriptor) != 1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+ for param in kwargs:
+ if param == 'new_name':
+ descriptor['wim']['name'] = kwargs[param]
+ else:
+ descriptor['wim'][param] = kwargs[param]
+ return self._edit_item("wims", descriptor, uuid, name, all_tenants=None)
+
+ def attach_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, wim_user=None,
+ wim_password=None, wim_tenant_name=None, wim_tenant_id=None):
+ # check that exist
+ uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+ tenant_text = "/" + self._get_tenant()
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+ descriptor = {"wim": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor or params")
+
+ if wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+ # print args.name
+ try:
+ if wim_user:
+ descriptor['wim']['wim_user'] = wim_user
+ if wim_password:
+ descriptor['wim']['wim_password'] = wim_password
+ if wim_tenant_name:
+ descriptor['wim']['wim_tenant_name'] = wim_tenant_name
+ if wim_tenant_id:
+ descriptor['wim']['wim_tenant'] = wim_tenant_id
+ except (KeyError, TypeError) as e:
+ if str(e) == 'wim':
+ error_pos = "missing field 'wim'"
+ else:
+ error_pos = "wrong format"
+ raise OpenmanoBadParamsException("Wrong wim descriptor: " + error_pos)
+
+ payload_req = yaml.safe_dump(descriptor)
+ # print payload_req
+ URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text)
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code == 200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ def detach_wim(self, uuid=None, name=None):
+ if not uuid:
+ # check that exist
+ uuid = self._get_item_uuid("wims", uuid, name, all_tenants=False)
+ tenant_text = "/" + self._get_tenant()
+ URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+ self.logger.debug("openmano DELETE %s", URLrequest)
+ mano_response = requests.delete(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text)
+
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code == 200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+
+ #VNFS
+ def list_vnfs(self, all_tenants=False, **kwargs):
+ '''Obtain a list of vnfs
+ Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'vnfs':[{vnf1_info},{vnf2_info},...]}}
+ '''
+ return self._list_item("vnfs", all_tenants, kwargs)
+
+ def get_vnf(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a vnf
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'vnf':{vnf_info}}
+ '''
+ return self._get_item("vnfs", uuid, name, all_tenants)
+
+ def delete_vnf(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a vnf
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("vnfs", uuid, name, all_tenants)
+
+ def create_vnf(self, descriptor=None, descriptor_format=None, **kwargs):
+ '''Creates a vnf
+ Params: must supply a descriptor
+ descriptor: with format {'vnf':{new_vnf_info}}
+ must be a dictionary or a json/yaml text.
+ must be a dictionary or a json/yaml text.
+ Other parameters can be:
+ #TODO, revise
+ name: the vnf name. Overwrite descriptor name if any
+ image_path: Can be a string or a string list. Overwrite the image_path at descriptor
+ description: vnf descriptor.. Overwrite descriptor description if any
+ public: boolean, available to other tenants
+ class: user text for vnf classification
+ tenant_id: Propietary tenant
+ ...
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'vnf':{new_vnf_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ try:
+ if "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
+ api_version = "v3"
+ token = "vnfd"
+ vnfd_catalog = descriptor.get("vnfd:vnfd-catalog")
+ if not vnfd_catalog:
+ vnfd_catalog = descriptor.get("vnfd-catalog")
+ vnfds = vnfd_catalog.get("vnfd:vnfd")
+ if not vnfds:
+ vnfds = vnfd_catalog.get("vnfd")
+ vnfd = vnfds[0]
+ vdu_list = vnfd["vdu"]
+ elif "vnf" in descriptor: # old API
+ api_version = None
+ token = "vnfs"
+ vnfd = descriptor['vnf']
+ vdu_list = vnfd["VNFC"]
+ else:
+ raise OpenmanoBadParamsException("Invalid VNF Descriptor must contain only one 'vnf' field or vnd-catalog")
+ except (KeyError, TypeError) as e:
+ raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
+
+ if kwargs:
+ try:
+ if kwargs.get('name'):
+ vnfd['name'] = kwargs['name']
+ if kwargs.get('description'):
+ vnfd['description'] = kwargs['description']
+ if kwargs.get('image_path'):
+ error_param = 'image_path'
+ image_list = kwargs['image_path'].split(",")
+ image_item = image_list.pop(0)
+ # print "image-path", image_path_
+ for vdu in vdu_list:
+ if api_version == "v3":
+ if vdu.get("image"):
+ if image_item:
+ vdu['image'] = image_item
+ if "image-checksum" in vdu:
+ del vdu["image-checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get("volumes", ()): # image name in volumes
+ if image_item:
+ vol["image"] = image_item
+ if "image-checksum" in vol:
+ del vol["image-checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ else:
+ if image_item:
+ vdu['VNFC image'] = image_item
+ if "image name" in vdu:
+ del vdu["image name"]
+ if "image checksum" in vdu:
+ del vdu["image checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get('devices', ()):
+ if vol['type'] != 'disk':
+ continue
+ if image_item:
+ vol['image'] = image_item
+ if "image name" in vol:
+ del vol["image name"]
+ if "image checksum" in vol:
+ del vol["image checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ if kwargs.get('image_name'): # image name precedes if both are supplied
+ error_param = 'image_name'
+ image_list = kwargs['image_name'].split(",")
+ image_item = image_list.pop(0)
+ for vdu in vdu_list:
+ if api_version == "v3":
+ if vdu.get("image"):
+ if image_item:
+ vdu['image'] = image_item
+ if "image-checksum" in vdu:
+ del vdu["image-checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get("volumes", ()): # image name in volumes
+ if image_item:
+ vol["image"] = image_item
+ if "image-checksum" in vol:
+ del vol["image-checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+ else:
+ if image_item:
+ vdu['image name'] = image_item
+ if "VNFC image" in vdu:
+ del vdu["VNFC image"]
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get('devices', ()):
+ if vol['type'] != 'disk':
+ continue
+ if image_item:
+ vol['image name'] = image_item
+ if "image" in vol:
+ del vol["image"]
+ if "image checksum" in vol:
+ del vol["image checksum"]
+ if image_list:
+ image_item = image_list.pop(0)
+
+ if kwargs.get('image_checksum'):
+ error_param = 'image_checksum'
+ image_list = kwargs['image_checksum'].split(",")
+ image_item = image_list.pop(0)
+ for vdu in vdu_list:
+ if api_version == "v3":
+ if vdu.get("image"):
+ if image_item:
+ vdu['image-checksum'] = image_item
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get("volumes", ()): # image name in volumes
+ if image_item:
+ vol["mage-checksum"] = image_item
+ if image_list:
+ image_item = image_list.pop(0)
+ else:
+ if image_item:
+ vdu['image checksum'] = image_item
+ if "VNFC image" in vdu:
+ del vdu["VNFC image"]
+ if image_list:
+ image_item = image_list.pop(0)
+ for vol in vdu.get('devices', ()):
+ if vol['type'] != 'disk':
+ continue
+ if image_item:
+ vol['image checksum'] = image_item
+ if "image" in vol:
+ del vol["image"]
+ if image_list:
+ image_item = image_list.pop(0)
+ except IndexError:
+ raise OpenmanoBadParamsException("{} contains more items than {} at descriptor".format(
+ error_param, "vnfd-catalog:vnfd:vdu" if api_version else "vnf:VNFC"))
+ except (KeyError, TypeError) as e:
+ raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
+ return self._create_item(token, descriptor, api_version=api_version)
+
+# def edit_vnf(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+# '''Edit the parameters of a vnf
+# Params: must supply a descriptor or/and a parameters to change
+# uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+# descriptor: with format {'vnf':{params to change info}}
+# parameters to change can be supplyied by the descriptor or as parameters:
+# new_name: the vnf name
+# vim_url: the vnf URL
+# vim_url_admin: the vnf URL for administrative issues
+# vim_type: the vnf type, can be openstack or openvim.
+# public: boolean, available to other tenants
+# description: vnf description
+# Return: Raises an exception on error, not found or found several
+# Obtain a dictionary with format {'vnf':{new_vnf_info}}
+# '''
+#
+# if isinstance(descriptor, str):
+# descriptor = self.parse(descriptor, descriptor_format)
+# elif descriptor:
+# pass
+# elif kwargs:
+# descriptor={"vnf": {}}
+# else:
+# raise OpenmanoBadParamsException("Missing descriptor")
+#
+# if 'vnf' not in descriptor or len(descriptor)>2:
+# raise OpenmanoBadParamsException("Descriptor must contain only one 'vnf' field")
+# for param in kwargs:
+# if param=='new_name':
+# descriptor['vnf']['name'] = kwargs[param]
+# else:
+# descriptor['vnf'][param] = kwargs[param]
+# return self._edit_item("vnfs", descriptor, uuid, name, all_tenants=None)
+
+ #SCENARIOS
+ def list_scenarios(self, all_tenants=False, **kwargs):
+ '''Obtain a list of scenarios
+ Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'scenarios':[{scenario1_info},{scenario2_info},...]}}
+ '''
+ return self._list_item("scenarios", all_tenants, kwargs)
+
+ def get_scenario(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a scenario
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'scenario':{scenario_info}}
+ '''
+ return self._get_item("scenarios", uuid, name, all_tenants)
+
+ def delete_scenario(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a scenario
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("scenarios", uuid, name, all_tenants)
+
+ def create_scenario(self, descriptor=None, descriptor_format=None, **kwargs):
+ """Creates a scenario
+ Params: must supply a descriptor
+ descriptor: with format {'scenario':{new_scenario_info}}
+ must be a dictionary or a json/yaml text.
+ Other parameters can be:
+ name: the scenario name. Overwrite descriptor name if any
+ description: scenario descriptor.. Overwrite descriptor description if any
+ public: boolean, available to other tenants
+ tenant_id. Propietary tenant
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'scenario':{new_scenario_info}}
+ """
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ try:
+ if "nsd:nsd-catalog" in descriptor or "nsd-catalog" in descriptor:
+ api_version = "v3"
+ token = "nsd"
+ nsd_catalog = descriptor.get("nsd:nsd-catalog")
+ if not nsd_catalog:
+ nsd_catalog = descriptor.get("nsd-catalog")
+ nsds = nsd_catalog.get("nsd:nsd")
+ if not nsds:
+ nsds = nsd_catalog.get("nsd")
+ nsd = nsds[0]
+ elif "scenario" in descriptor: # old API
+ api_version = None
+ token = "scenarios"
+ nsd = descriptor['scenario']
+ else:
+ raise OpenmanoBadParamsException("Invalid NS Descriptor must contain only one 'scenario' field or nsd-catalog")
+ except (KeyError, TypeError) as e:
+ raise OpenmanoBadParamsException("Invalid NS Descriptor. Missing field {}".format(e))
+
+ for param in kwargs:
+ nsd[param] = kwargs[param]
+ return self._create_item(token, descriptor, api_version=api_version)
+
+ def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+ '''Edit the parameters of a scenario
+ Params: must supply a descriptor or/and a parameters to change
+ uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ descriptor: with format {'scenario':{params to change info}}
+ must be a dictionary or a json/yaml text.
+ parameters to change can be supplyied by the descriptor or as parameters:
+ new_name: the scenario name
+ public: boolean, available to other tenants
+ description: scenario description
+ tenant_id. Propietary tenant
+ Return: Raises an exception on error, not found or found several
+ Obtain a dictionary with format {'scenario':{new_scenario_info}}
+ '''
+
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif kwargs:
+ descriptor={"scenario": {}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'scenario' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'scenario' field")
+ for param in kwargs:
+ if param=='new_name':
+ descriptor['scenario']['name'] = kwargs[param]
+ else:
+ descriptor['scenario'][param] = kwargs[param]
+ return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None)
+
+
+ #INSTANCE-SCENARIOS
+ def list_instances(self, all_tenants=False, **kwargs):
+ '''Obtain a list of instances
+ Params: can be filtered by 'uuid','name','description','scenario_id', "tenant_id"
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'instances':[{instance1_info},{instance2_info},...]}}
+ '''
+ return self._list_item("instances", all_tenants, kwargs)
+
+ def get_instance(self, uuid=None, name=None, all_tenants=False):
+ '''Obtain the information of a instance
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several
+ Obtain a dictionary with format {'instance':{instance_info}}
+ '''
+ return self._get_item("instances", uuid, name, all_tenants)
+
+ def delete_instance(self, uuid=None, name=None, all_tenants=False):
+ '''Delete a instance
+ Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+ Return: Raises an exception on error, not found, found several, not free
+ Obtain a dictionary with format {'result': text indicating deleted}
+ '''
+ return self._del_item("instances", uuid, name, all_tenants)
+
+ def create_instance(self, descriptor=None, descriptor_format=None, name=None, **kwargs):
+ '''Creates a instance
+ Params: must supply a descriptor or/and a name and scenario
+ descriptor: with format {'instance':{new_instance_info}}
+ must be a dictionary or a json/yaml text.
+ name: the instance name. Overwrite descriptor name if any
+ Other parameters can be:
+ description: instance descriptor.. Overwrite descriptor description if any
+ datacenter_name, datacenter_id: datacenter where to be deployed
+ scenario_name, scenario_id: Scenario this instance is based on
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'instance':{new_instance_info}}
+ '''
+ if isinstance(descriptor, str):
+ descriptor = self.parse(descriptor, descriptor_format)
+ elif descriptor:
+ pass
+ elif name and ("scenario_name" in kwargs or "scenario_id" in kwargs):
+ descriptor = {"instance": {"name": name}}
+ else:
+ raise OpenmanoBadParamsException("Missing descriptor")
+
+ if 'instance' not in descriptor or len(descriptor)>2:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'instance' field, and an optional version")
+ if name:
+ descriptor['instance']["name"] = name
+ if "scenario_name" in kwargs or "scenario_id" in kwargs:
+ descriptor['instance']["scenario"] = self._get_item_uuid("scenarios", kwargs.get("scenario_id"), kwargs.get("scenario_name"))
+ if "datacenter_name" in kwargs or "datacenter_id" in kwargs:
+ descriptor['instance']["datacenter"] = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"))
+ if "description" in kwargs:
+ descriptor['instance']["description"] = kwargs.get("description")
+ #for param in kwargs:
+ # descriptor['instance'][param] = kwargs[param]
+ if "datacenter" not in descriptor['instance']:
+ descriptor['instance']["datacenter"] = self._get_datacenter()
+ return self._create_item("instances", descriptor)
+
+ #VIM ACTIONS
+ def vim_action(self, action, item, uuid=None, all_tenants=False, **kwargs):
+ '''Perform an action over a vim
+ Params:
+ action: can be 'list', 'get'/'show', 'delete' or 'create'
+ item: can be 'tenants' or 'networks'
+ uuid: uuid of the tenant/net to show or to delete. Ignore otherwise
+ other parameters:
+ datacenter_name, datacenter_id: datacenters to act on, if missing uses classes store datacenter
+ descriptor, descriptor_format: descriptor needed on creation, can be a dict or a yaml/json str
+ must be a dictionary or a json/yaml text.
+ name: for created tenant/net Overwrite descriptor name if any
+ description: tenant descriptor. Overwrite descriptor description if any
+
+ Return: Raises an exception on error
+ Obtain a dictionary with format {'tenant':{new_tenant_info}}
+ '''
+ if item not in ("tenants", "networks", "images"):
+ raise OpenmanoBadParamsException("Unknown value for item '{}', must be 'tenants', 'nets' or "
+ "images".format(str(item)))
+
+ image_actions = ['list','get','show','delete']
+ if item == "images" and action not in image_actions:
+ raise OpenmanoBadParamsException("Only available actions for item '{}' are {}\n"
+ "Requested action was '{}'".format(item, ', '.join(image_actions), action))
+ if all_tenants:
+ tenant_text = "/any"
+ else:
+ tenant_text = "/"+self._get_tenant()
+
+ if "datacenter_id" in kwargs or "datacenter_name" in kwargs:
+ datacenter = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"), all_tenants=all_tenants)
+ else:
+ datacenter = self._get_datacenter()
+
+ if action=="list":
+ URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="get" or action=="show":
+ URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+ self.logger.debug("GET %s", URLrequest )
+ mano_response = requests.get(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="delete":
+ URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+ self.logger.debug("DELETE %s", URLrequest )
+ mano_response = requests.delete(URLrequest, headers=self.headers_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ elif action=="create":
+ if "descriptor" in kwargs:
+ if isinstance(kwargs["descriptor"], str):
+ descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format") )
+ else:
+ descriptor = kwargs["descriptor"]
+ elif "name" in kwargs:
+ descriptor={item[:-1]: {"name": kwargs["name"]}}
+ else:
+ raise OpenmanoResponseException("Missing descriptor")
+
+ if item[:-1] not in descriptor or len(descriptor)!=1:
+ raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+ if "name" in kwargs:
+ descriptor[ item[:-1] ]['name'] = kwargs["name"]
+ if "description" in kwargs:
+ descriptor[ item[:-1] ]['description'] = kwargs["description"]
+ payload_req = yaml.safe_dump(descriptor)
+ #print payload_req
+ URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+ self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+ mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+ self.logger.debug("openmano response: %s", mano_response.text )
+ content = self._parse_yaml(mano_response.text, response=True)
+ if mano_response.status_code==200:
+ return content
+ else:
+ raise OpenmanoResponseException(str(content))
+ else:
+ raise OpenmanoBadParamsException("Unknown value for action '{}".format(str(action)))
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#HTTP Server parameters (MANO API). IP address and port where openmanod listens
+# IPtables/firewalld must allow this port
+# for CentOS/Redhad firewalld is configured at '/etc/firewalld/services/openmanod.xml'
+# edit this file and reload firewalld with 'firewall-cmd --reload' if port is changed
+http_host: 0.0.0.0 # IP address, (by default, 0.0.0.0 means that it will listen in all interfaces)
+http_port: 9090 # General port (by default, 9090)
+#http_admin_port: 9095 # Admin port where openmano is listening (when missing, no administration server is launched)
+ # Not used in current version!
+
+#Parameters for a VIM console access. Can be directly the VIM URL or a proxy to offer the openmano IP address
+http_console_proxy: False #by default True. If False proxy is not implemented and VIM URL is offered. It is
+ #assumed then, that client can access directly to the VIMs
+#http_console_host: <ip> #by default the same as 'http_host'. However is openmano server is behind a NAT/proxy
+ #you should specify the public IP used to access the server. Also when 'http_host' is
+ #0.0.0.0 you should specify the concrete IP address (or name) the server is accessed
+# Ports to be used. Comma separated list. Can contain a {"from":<port>, "to":<port>} entry
+#e.g. from 9000 to 9005: [{"from":9000, "to":9005}], or also [9000,9001,9002,9003,9004,9005]
+#e.g. from 9000 to 9100 apart from 9050,9053: [{"from":9000, "to":9049},9051,9052,{"from":9054, "to":9099}]
+http_console_ports: [{"from":9096, "to":9110}]
+
+#Database parameters
+db_host: localhost # by default localhost
+db_user: mano # DB user
+db_passwd: manopw # DB password
+db_name: mano_db # Name of the MANO DB
+# Database ovim parameters
+db_ovim_host: localhost # by default localhost
+db_ovim_user: mano # DB user
+db_ovim_passwd: manopw # DB password
+db_ovim_name: mano_vim_db # Name of the OVIM MANO DB
+
+
+#other MANO parameters
+# Folder where the VNF descriptors will be stored
+# The folder will be created in the execution folder if it does not exist
+#vnf_repository: "./vnfrepo" # Use an absolute path to avoid misunderstandings
+
+# Indicates if at VNF onboarding, flavors and images are loaded at all related VIMs,
+# in order to speed up the later instantiation.
+auto_push_VNF_to_VIMs: False # by default True
+
+#general logging parameters
+ #choose among: DEBUG, INFO, WARNING, ERROR, CRITICAL
+log_level: INFO #general log levels for internal logging
+#standard output is used unless 'log_file' is specify
+#log_file: /var/log/openmano/openmano.log
+
+#individual logging settings
+log_level_db: ERROR #database log levels
+#log_file_db: /opt/openmano/logs/openmano_db.log
+#log_level_vim: DEBUG #VIM connection log levels
+#log_file_vim: /opt/openmano/logs/openmano_vimconn.log
+#log_level_wim: DEBUG #WIM connection log levels
+#log_file_wim: /opt/openmano/logs/openmano_wimconn.log
+#log_level_nfvo: DEBUG #Main engine log levels
+#log_file_nfvo: /opt/openmano/logs/openmano_nfvo.log
+#log_level_http: DEBUG #Main engine log levels
+#log_file_http: /opt/openmano/logs/openmano_http.log
+#log_level_console: DEBUG #proxy console log levels
+#log_file_console: /opt/openmano/logs/openmano_console.log
+#log_level_ovim: DEBUG #ovim library log levels
+#log_file_ovim: /opt/openmano/logs/openmano_ovim.log
+#log_level_sdn: DEBUG
+#log_file_sdn: /opt/openmano/logs/openmano_sdn.log
+#log_level_sdnconn: DEBUG
+#log_file_sdnconn: /opt/openmano/logs/openmano_sdnconn.log
+
+#Uncomment to send logs via IP to an external host
+#log_socket_host: localhost
+log_socket_port: 9022
+log_socket_level: DEBUG #general log levels for socket logging
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano server.
+Main program that implements a reference NFVO (Network Functions Virtualisation Orchestrator).
+It interfaces with an NFV VIM through its API and offers a northbound interface, based on REST (openmano API),
+where NFV services are offered including the creation and deletion of VNF templates, VNF instances,
+network service templates and network service instances.
+
+It loads the configuration file and launches the http_server thread that will listen requests using openmano API.
+"""
+
+import time
+import sys
+import getopt
+import yaml
+from os import environ, path as os_path
+from jsonschema import validate as js_v, exceptions as js_e
+import logging
+import logging.handlers as log_handlers
+import socket
+
+from yaml import MarkedYAMLError
+
+from osm_ro import httpserver, nfvo, nfvo_db
+from osm_ro.openmano_schemas import config_schema
+from osm_ro.db_base import db_base_Exception
+from osm_ro.wim.engine import WimEngine
+from osm_ro.wim.persistence import WimPersistence
+import osm_ro
+
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ = "$26-aug-2014 11:09:29$"
+__version__ = "8.0.0.post1"
+version_date = "Oct 2020"
+database_version = 42 # expected database schema version
+
+global global_config
+global logger
+
+
+class LoadConfigurationException(Exception):
+ pass
+
+
+def load_configuration(configuration_file):
+ default_tokens = {'http_port': 9090,
+ 'http_host': 'localhost',
+ 'http_console_proxy': True,
+ 'http_console_host': None,
+ 'log_level': 'DEBUG',
+ 'log_socket_port': 9022,
+ 'auto_push_VNF_to_VIMs': True,
+ 'db_host': 'localhost',
+ 'db_ovim_host': 'localhost'
+ }
+ try:
+ # Check config file exists
+ with open(configuration_file, 'r') as f:
+ config_str = f.read()
+ # Parse configuration file
+ config = yaml.load(config_str, Loader=yaml.SafeLoader)
+ # Validate configuration file with the config_schema
+ js_v(config, config_schema)
+
+ # Add default values tokens
+ for k, v in default_tokens.items():
+ if k not in config:
+ config[k] = v
+ return config
+
+ except yaml.YAMLError as e:
+ error_pos = ""
+ if isinstance(e, MarkedYAMLError):
+ mark = e.problem_mark
+ error_pos = " at line:{} column:{}".format(mark.line + 1, mark.column + 1)
+ raise LoadConfigurationException("Bad YAML format at configuration file '{file}'{pos}: {message}".format(
+ file=configuration_file, pos=error_pos, message=e))
+ except js_e.ValidationError as e:
+ error_pos = ""
+ if e.path:
+ error_pos = " at '" + ":".join(map(str, e.path)) + "'"
+ raise LoadConfigurationException("Invalid field at configuration file '{file}'{pos} {message}".format(
+ file=configuration_file, pos=error_pos, message=e))
+ except Exception as e:
+ raise LoadConfigurationException("Cannot load configuration file '{file}' {message}".format(
+ file=configuration_file, message=e))
+
+
+def console_port_iterator():
+ """
+ this iterator deals with the http_console_ports
+ returning the ports one by one
+ """
+ index = 0
+ while index < len(global_config["http_console_ports"]):
+ port = global_config["http_console_ports"][index]
+ if type(port) is int:
+ yield port
+ else: # this is dictionary with from to keys
+ port2 = port["from"]
+ while port2 <= port["to"]:
+ yield port2
+ port2 += 1
+ index += 1
+
+
+def usage():
+ print("Usage: ", sys.argv[0], "[options]")
+ print(" -v|--version: prints current version")
+ print(" -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)")
+ print(" -h|--help: shows this help")
+ print(
+ " -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
+ print(
+ " -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
+ print(" --log-socket-host HOST: send logs to this host")
+ print(" --log-socket-port PORT: send logs using this port (default: 9022)")
+ print(" --log-file FILE: send logs to this file")
+ print(
+ " --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
+ return
+
+
+def set_logging_file(log_file):
+ try:
+ file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
+ file_handler.setFormatter(log_formatter_simple)
+ logger.addHandler(file_handler)
+ # remove initial stream handler
+ logging.root.removeHandler(logging.root.handlers[0])
+ print ("logging on '{}'".format(log_file))
+ except IOError as e:
+ raise LoadConfigurationException(
+ "Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, e))
+
+
+def _get_version():
+ """
+ Try to get version from package using pkg_resources (available with setuptools)
+ """
+ global __version__
+ ro_version = __version__
+ try:
+ from pkg_resources import get_distribution
+ ro_version = get_distribution("osm_ro").version
+ except Exception:
+ pass
+ return ro_version
+
+
+if __name__ == "__main__":
+ log_modules = ("nfvo", "http", "vim", "wim", "db", "console", "ovim", "sdn", "sdnconn")
+ # env2config contains environ variable names and the correspondence with configuration file openmanod.cfg keys.
+ # If this environ is defined, this value is taken instead of the one at at configuration file
+ env2config = {
+ 'RO_DB_HOST': 'db_host',
+ 'RO_DB_NAME': 'db_name',
+ 'RO_DB_USER': 'db_user',
+ 'RO_DB_PASSWORD': 'db_passwd',
+ 'RO_DB_OVIM_HOST': 'db_ovim_host',
+ 'RO_DB_OVIM_NAME': 'db_ovim_name',
+ 'RO_DB_OVIM_USER': 'db_ovim_user',
+ 'RO_DB_OVIM_PASSWORD': 'db_ovim_passwd',
+ 'RO_LOG_LEVEL': 'log_level',
+ 'RO_LOG_FILE': 'log_file',
+ }
+ for log_module in log_modules:
+ env2config['RO_LOG_LEVEL_' + log_module.upper()] = 'log_level_' + log_module
+ ro_version = _get_version()
+ # Configure logging step 1
+ hostname = socket.gethostname()
+ log_formatter_str = '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'
+ log_formatter_complete = logging.Formatter(log_formatter_str.format(host=hostname), datefmt='%Y-%m-%dT%H:%M:%S')
+ log_format_simple = "%(asctime)s %(levelname)s %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
+ log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S.%03d')
+ logging.basicConfig(format=log_format_simple, level=logging.DEBUG, datefmt='%Y-%m-%dT%H:%M:%S.%03d')
+ logger = logging.getLogger('openmano')
+ logger.setLevel(logging.DEBUG)
+ socket_handler = None
+ # Read parameters and configuration file
+ httpthread = None
+ try:
+ # load parameters and configuration
+ opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:",
+ ["config=", "help", "version", "port=", "vnf-repository=", "adminport=",
+ "log-socket-host=", "log-socket-port=", "log-file=", "create-tenant="])
+ port = None
+ port_admin = None
+ config_file = 'openmanod.cfg'
+ vnf_repository = None
+ log_file = None
+ log_socket_host = None
+ log_socket_port = None
+ create_tenant = None
+
+ for o, a in opts:
+ if o in ("-v", "--version"):
+ print ("openmanod version {} {}".format(ro_version, version_date))
+ print ("(c) Copyright Telefonica")
+ sys.exit()
+ elif o in ("-h", "--help"):
+ usage()
+ sys.exit()
+ elif o in ("-V", "--vnf-repository"):
+ vnf_repository = a
+ elif o in ("-c", "--config"):
+ config_file = a
+ elif o in ("-p", "--port"):
+ port = a
+ elif o in ("-P", "--adminport"):
+ port_admin = a
+ elif o == "--log-socket-port":
+ log_socket_port = a
+ elif o == "--log-socket-host":
+ log_socket_host = a
+ elif o == "--log-file":
+ log_file = a
+ elif o == "--create-tenant":
+ create_tenant = a
+ else:
+ assert False, "Unhandled option"
+ if log_file:
+ set_logging_file(log_file)
+ global_config = load_configuration(config_file)
+ global_config["version"] = ro_version
+ global_config["version_date"] = version_date
+ # Override parameters obtained by command line on ENV
+ if port:
+ global_config['http_port'] = port
+ if port_admin:
+ global_config['http_admin_port'] = port_admin
+ if log_socket_host:
+ global_config['log_socket_host'] = log_socket_host
+ if log_socket_port:
+ global_config['log_socket_port'] = log_socket_port
+
+ # override with ENV
+ for env_k, env_v in environ.items():
+ try:
+ if not env_k.startswith("RO_") or env_k not in env2config or not env_v:
+ continue
+ global_config[env2config[env_k]] = env_v
+ if env_k.endswith("PORT"): # convert to int, skip if not possible
+ global_config[env2config[env_k]] = int(env_v)
+ except Exception as e:
+ logger.warning("skipping environ '{}={}' because exception '{}'".format(env_k, env_v, e))
+
+ global_config["console_port_iterator"] = console_port_iterator
+ global_config["console_thread"] = {}
+ global_config["console_ports"] = {}
+ if not global_config["http_console_host"]:
+ global_config["http_console_host"] = global_config["http_host"]
+ if global_config["http_host"] == "0.0.0.0":
+ global_config["http_console_host"] = socket.gethostname()
+
+ # Configure logging STEP 2
+ if "log_host" in global_config:
+ socket_handler = log_handlers.SocketHandler(global_config["log_socket_host"],
+ global_config["log_socket_port"])
+ socket_handler.setFormatter(log_formatter_complete)
+ if global_config.get("log_socket_level") \
+ and global_config["log_socket_level"] != global_config["log_level"]:
+ socket_handler.setLevel(global_config["log_socket_level"])
+ logger.addHandler(socket_handler)
+
+ if log_file:
+ global_config['log_file'] = log_file
+ elif global_config.get('log_file'):
+ set_logging_file(global_config['log_file'])
+
+ logger.setLevel(getattr(logging, global_config['log_level']))
+ logger.critical("Starting openmano server version: '%s %s' command: '%s'",
+ ro_version, version_date, " ".join(sys.argv))
+
+ for log_module in log_modules:
+ log_level_module = "log_level_" + log_module
+ log_file_module = "log_file_" + log_module
+ logger_module = logging.getLogger('openmano.' + log_module)
+ if log_level_module in global_config:
+ logger_module.setLevel(global_config[log_level_module])
+ if log_file_module in global_config:
+ try:
+ file_handler = logging.handlers.RotatingFileHandler(global_config[log_file_module],
+ maxBytes=100e6, backupCount=9, delay=0)
+ file_handler.setFormatter(log_formatter_simple)
+ logger_module.addHandler(file_handler)
+ except IOError as e:
+ raise LoadConfigurationException(
+ "Cannot open logging file '{}': {}. Check folder exist and permissions".format(
+ global_config[log_file_module], str(e)))
+ global_config["logger_" + log_module] = logger_module
+
+ # Initialize DB connection
+ mydb = nfvo_db.nfvo_db()
+ mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'],
+ global_config['db_name'])
+ db_path = osm_ro.__path__[0] + "/database_utils"
+ if not os_path.exists(db_path + "/migrate_mano_db.sh"):
+ db_path = osm_ro.__path__[0] + "/../database_utils"
+ try:
+ r = mydb.get_db_version()
+ if r[0] != database_version:
+ logger.critical("DATABASE wrong version '{current}'. Try to upgrade/downgrade to version '{target}'"
+ " with '{db_path}/migrate_mano_db.sh {target}'".format(current=r[0],
+ target=database_version,
+ db_path=db_path))
+ exit(-1)
+ except db_base_Exception as e:
+ logger.critical("DATABASE is not valid. If you think it is corrupted, you can init it with"
+ " '{db_path}/init_mano_db.sh' script".format(db_path=db_path))
+ exit(-1)
+
+ nfvo.global_config = global_config
+ if create_tenant:
+ try:
+ nfvo.new_tenant(mydb, {"name": create_tenant})
+ except Exception as e:
+ if isinstance(e, nfvo.NfvoException) and e.http_code == 409:
+ pass # if tenant exist (NfvoException error 409), ignore
+ else: # otherwise print and error and continue
+ logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
+
+ # WIM module
+ wim_persistence = WimPersistence(mydb)
+ wim_engine = WimEngine(wim_persistence, nfvo.plugins)
+ # ---
+ nfvo.start_service(mydb, wim_persistence, wim_engine)
+
+ httpthread = httpserver.httpserver(
+ mydb, False,
+ global_config['http_host'], global_config['http_port'],
+ wim_persistence, wim_engine
+ )
+
+ httpthread.start()
+ if 'http_admin_port' in global_config:
+ httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'],
+ global_config['http_admin_port'])
+ httpthreadadmin.start()
+ time.sleep(1)
+ logger.info('Waiting for http clients')
+ print('Waiting for http clients')
+ print('openmanod ready')
+ print('====================')
+ time.sleep(20)
+ sys.stdout.flush()
+
+ # TODO: Interactive console must be implemented here instead of join or sleep
+
+ # httpthread.join()
+ # if 'http_admin_port' in global_config:
+ # httpthreadadmin.join()
+ while True:
+ time.sleep(86400)
+
+ except KeyboardInterrupt as e:
+ logger.info(str(e))
+ except SystemExit:
+ pass
+ except getopt.GetoptError as e:
+ logger.critical(str(e)) # will print something like "option -a not recognized"
+ exit(-1)
+ except LoadConfigurationException as e:
+ logger.critical(str(e))
+ exit(-1)
+ except db_base_Exception as e:
+ logger.critical(str(e))
+ exit(-1)
+ except nfvo.NfvoException as e:
+ logger.critical(str(e), exc_info=True)
+ exit(-1)
+ nfvo.stop_service()
+ if httpthread:
+ httpthread.join(1)
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+[Unit]
+Description=openmano server (OSM RO)
+After=mysql.service
+
+[Service]
+ExecStart=/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+# to get docker id that is running:
+# $ docker_ro=`docker service ps osm_ro -f desired-state=running --format "{{.Name}}.{{.ID}}" --no-trunc`
+# exec with:
+# $ docker exec -ti $docker_ro RO-of
+
+function get_from_db()
+{
+ echo "select $1 from $2 where name='$3' or uuid='$3';" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" 2>/dev/null | tail -n1
+}
+
+[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
+
+if [ -z "$1" ] ; then
+ echo "usage '$0 <sdn_controller> command'"
+ echo
+ echo "available sdn_controllers are:"
+ echo "select uuid, name, type, ip, dpid, status from ofcs;" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" 2>/dev/null
+ exit
+fi
+
+
+export OF_CONTROLLER_DPID=`get_from_db dpid ofcs $1`
+[ -z "$OF_CONTROLLER_DPID" ] && echo "Cannot find sdn_controller '$1' at database" >&2 && exit 1
+
+export OF_CONTROLLER_IP=`get_from_db ip ofcs $1`
+export OF_CONTROLLER_PORT=`get_from_db port ofcs $1`
+export OF_CONTROLLER_USER=`get_from_db user ofcs $1`
+export OF_CONTROLLER_PASSWORD=`get_from_db password ofcs $1`
+export OF_CONTROLLER_TYPE=`get_from_db type ofcs $1`
+
+shift
+openflow-lib "$@"
+
+
+
--- /dev/null
+#!/bin/bash
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+# This script is intended for launching RO from a docker container.
+# It waits for mysql server ready, normally running on a separate container, ...
+# then it checks if database is present and creates it if needed.
+# Finally it launches RO server.
+
+# if New Generation RO just launch it
+if [ -n "$OSMRO_NG" ] ; then
+ python3 -m osm_ng_ro.ro_main || exit 1
+ exit 0
+fi
+
+[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
+[ -z "$RO_DB_OVIM_ROOT_PASSWORD" ] && export RO_DB_OVIM_ROOT_PASSWORD="$RO_DB_ROOT_PASSWORD"
+
+# IF OSMRO_SERVER_NG use new server that not need any database init
+[ -n "$OSMRO_SERVER_NG" ] && python3 -m osm_ng_ro.ro_main
+
+
+function is_db_created() {
+ db_host=$1
+ db_port=$2
+ db_user=$3
+ db_pswd=$4
+ db_name=$5
+ db_version=$6 # minimun database version
+
+ if mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" | grep -v Wildcard | grep -q -e "$db_name" ; then
+ if echo "SELECT comments FROM schema_version WHERE version_int=0;" |
+ mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+ grep -q -e "init" ; then
+ echo " DB $db_name exists BUT failed in previous init" >&2
+ return 1
+ elif echo "SELECT * FROM schema_version WHERE version_int=$db_version;" |
+ mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+ grep -q -e "$db_version" ; then
+ echo " DB $db_name exists and inited" >&2
+ return 0
+ else
+ echo " DB $db_name exists BUT not inited" >&2
+ return 1
+ fi
+ fi
+ echo " DB $db_name does not exist" >&2
+ return 1
+}
+
+function configure(){
+ #Database parameters
+ #db_host: localhost
+ #db_user: mano
+ #db_passwd: manopw
+ #db_name: mano_db
+ # Database ovim parameters
+ #db_ovim_host: localhost # by default localhost
+ #db_ovim_user: mano # DB user
+ #db_ovim_passwd: manopw # DB password
+ #db_ovim_name: mano_vim_db # Name of the OVIM MANO DB
+
+
+ sed -i "s/^db_host:.*/db_host: $RO_DB_HOST/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_user:.*/db_user: $RO_DB_USER/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_passwd:.*/db_passwd: $RO_DB_PASSWORD/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_name:.*/db_name: $RO_DB_NAME/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_ovim_host:.*/db_ovim_host: $RO_DB_OVIM_HOST/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_ovim_user:.*/db_ovim_user: $RO_DB_OVIM_USER/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_ovim_passwd:.*/db_ovim_passwd: $RO_DB_OVIM_PASSWORD/" /etc/osm/openmanod.cfg || return 1
+ sed -i "s/^db_ovim_name:.*/db_ovim_name: $RO_DB_OVIM_NAME/" /etc/osm/openmanod.cfg || return 1
+ return 0
+}
+
+max_attempts=120
+function wait_db(){
+ db_host=$1
+ db_port=$2
+ attempt=0
+ echo "Wait until $max_attempts seconds for MySQL mano Server ${db_host}:${db_port} "
+ while ! mysqladmin ping -h"$db_host" -P"$db_port" --silent; do
+ #wait 120 sec
+ if [ $attempt -ge $max_attempts ]; then
+ echo
+ echo "Cannot connect to database ${db_host}:${db_port} during $max_attempts sec" >&2
+ return 1
+ fi
+ attempt=$[$attempt+1]
+ echo -n "."
+ sleep 1
+ done
+ return 0
+}
+
+
+echo "1/4 Apply config"
+# this is not needed anymore because envioron overwrites config file
+# configure || exit 1
+
+
+echo "2/4 Wait for db up"
+wait_db "$RO_DB_HOST" "$RO_DB_PORT" || exit 1
+[ "$RO_DB_OVIM_HOST" = "$RO_DB_HOST" ] || wait_db "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" || exit 1
+
+
+echo "3/4 Init database"
+RO_PATH=`python3 -c 'import osm_ro; print(osm_ro.__path__[0])'`
+echo "RO_PATH: $RO_PATH"
+if ! is_db_created "$RO_DB_HOST" "$RO_DB_PORT" "$RO_DB_USER" "$RO_DB_PASSWORD" "$RO_DB_NAME" "27"
+then
+ if [ -n "$RO_DB_ROOT_PASSWORD" ] ; then
+ mysqladmin -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" create "$RO_DB_NAME"
+ echo "CREATE USER '${RO_DB_USER}'@'%' IDENTIFIED BY '${RO_DB_PASSWORD}';" |
+ mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" || echo "user ${RO_DB_USER} already created?"
+ echo "GRANT ALL PRIVILEGES ON ${RO_DB_NAME}.* TO '${RO_DB_USER}'@'%';" |
+ mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" || echo "user ${RO_DB_USER} already granted?"
+ fi
+ ${RO_PATH}/database_utils/init_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
+ -P "${RO_DB_PORT}" -d "${RO_DB_NAME}" || exit 1
+else
+ echo " migrate database version"
+ ${RO_PATH}/database_utils/migrate_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
+ -P "$RO_DB_PORT" -d "$RO_DB_NAME" -b /var/log/osm
+fi
+
+# TODO py3 BEGIN
+#OVIM_PATH=`python3 -c 'import lib_osm_openvim; print(lib_osm_openvim.__path__[0])'`
+#echo "OVIM_PATH: $OVIM_PATH"
+#if ! is_db_created "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" "$RO_DB_OVIM_USER" "$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" \
+# "22"
+#then
+# if [ -n "$RO_DB_OVIM_ROOT_PASSWORD" ] ; then
+# mysqladmin -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" create "$RO_DB_OVIM_NAME"
+# echo "CREATE USER '${RO_DB_OVIM_USER}'@'%' IDENTIFIED BY '${RO_DB_OVIM_PASSWORD}';" |
+# mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" ||
+# echo "user ${RO_DB_OVIM_USER} already created?"
+# echo "GRANT ALL PRIVILEGES ON ${RO_DB_OVIM_NAME}.* TO '${RO_DB_OVIM_USER}'@'%';" |
+# mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" ||
+# echo "user ${RO_DB_OVIM_USER} already granted?"
+# fi
+# ${OVIM_PATH}/database_utils/init_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST" \
+# -P "${RO_DB_OVIM_PORT}" -d "${RO_DB_OVIM_NAME}" || exit 1
+#else
+# echo " migrate database version"
+# ${OVIM_PATH}/database_utils/migrate_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST"\
+# -P "$RO_DB_OVIM_PORT" -d "$RO_DB_OVIM_NAME" -b /var/log/osm
+#fi
+# TODO py3 END
+
+echo "4/4 Try to start"
+# look for openmanod.cfg
+RO_CONFIG_FILE="/etc/osm/openmanod.cfg"
+[ -f "$RO_CONFIG_FILE" ] || RO_CONFIG_FILE=$(python3 -c 'import osm_ro; print(osm_ro.__path__[0])')/openmanod.cfg
+[ -f "$RO_CONFIG_FILE" ] || ! echo "configuration file 'openmanod.cfg' not found" || exit 1
+
+python3 -m osm_ro.openmanod -c "$RO_CONFIG_FILE" --create-tenant=osm # --log-file=/var/log/osm/openmano.log
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+#Utility for getting options, must be call with source
+#for every <option> it sets a variable 'option_<option>="-"'
+#if the option appears more than once, it concatenates a "-"
+#if the option contains an argument: 'option_<option>="argument"'
+#if the long option name contains "-" they are converted to "_"
+#params that are not options are stored in 'params'
+#the options to look for is received in the first argument,
+#a blank separator list with short and long options without the leading - or --
+#options to be stored in the same variable must appear in the same word separated by ':'
+#insert a trailing = if the option requires an argument
+#insert a trailing ? if the option may have an argument NOT IMPLEMENTED
+#option -- means get the rest of argument returned as 'option__=$*'
+
+#example: to allow options -h --help -j -k(with argument) --my-long-option(with argument)
+# and other parameters after -- provide
+# "help:h j k= my-long-option="
+#parsing "-h -karg pepe --my-long-option=otherar -- -s" will set variables
+# option_help="-"
+# option_k="arg"
+# option_my_long_option="otherarg"
+# params=" pepe"
+# option__="-s"
+
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && ___exit="return" || ___exit="exit"
+
+options="$1"
+shift
+
+get_argument=""
+#reset variables
+params=""
+for option_group in $options
+do
+ _name=${option_group%%:*}
+ _name=${_name%=}
+ _name=${_name//-/_}
+ eval option_${_name}='""'
+done
+
+while [[ $# -gt 0 ]]
+do
+ argument="$1"
+ shift
+ if [[ -n $get_argument ]]
+ then
+ [[ ${argument:0:1} == "-" ]] && echo "option '-$option' requires an argument" >&2 && $___exit 1
+ eval ${get_argument}='"$argument"'
+ #echo option $get_argument with argument
+ get_argument=""
+ continue
+ fi
+
+
+ #short options
+ if [[ ${argument:0:1} == "-" ]] && [[ ${argument:1:1} != "-" ]] && [[ ${#argument} -ge 2 ]]
+ then
+ index=0
+ while index=$((index+1)) && [[ $index -lt ${#argument} ]]
+ do
+ option=${argument:$index:1}
+ bad_option=y
+ for option_group in $options
+ do
+ _name=""
+ for o in $(echo $option_group | tr ":=" " ")
+ do
+ [[ -z "$_name" ]] && _name=${o//-/_}
+ #echo option $option versus $o
+ if [[ "$option" == "${o}" ]]
+ then
+ eval option_${_name}='${option_'${_name}'}-'
+ bad_option=n
+ if [[ ${option_group:${#option_group}-1} != "=" ]]
+ then
+ continue
+ fi
+ if [[ ${#argument} -gt $((index+1)) ]]
+ then
+ eval option_${_name}='"${argument:$((index+1))}"'
+ index=${#argument}
+ else
+ get_argument=option_${_name}
+ #echo next should be argument $argument
+ fi
+
+ break
+ fi
+ done
+ done
+ [[ $bad_option == y ]] && echo "invalid argument '-$option'? Type -h for help" >&2 && $___exit 1
+ done
+ elif [[ ${argument:0:2} == "--" ]] && [[ ${#argument} -ge 3 ]]
+ then
+ option=${argument:2}
+ option_argument=${option#*=}
+ option_name=${option%%=*}
+ [[ "$option_name" == "$option" ]] && option_argument=""
+ bad_option=y
+ for option_group in $options
+ do
+ _name=""
+ for o in $(echo $option_group | tr ":=" " ")
+ do
+ [[ -z "$_name" ]] && _name=${o//-/_}
+ #echo option $option versus $o
+ if [[ "$option_name" == "${o}" ]]
+ then
+ bad_option=n
+ if [[ ${option_group:${#option_group}-1} != "=" ]]
+ then #not an argument
+ [[ -n "${option_argument}" ]] && echo "option '--${option%%=*}' do not accept an argument " >&2 && $___exit 1
+ eval option_${_name}='"${option_'${_name}'}-"'
+ elif [[ -n "${option_argument}" ]]
+ then
+ eval option_${_name}='"${option_argument}"'
+ else
+ get_argument=option_${_name}
+ #echo next should be argument $argument
+ fi
+ break
+ fi
+ done
+ done
+ [[ $bad_option == y ]] && echo "invalid argument '-$option'? Type -h for help" >&2 && $___exit 1
+ elif [[ ${argument:0:2} == "--" ]]
+ then
+ option__="$*"
+ bad_option=y
+ for o in $options
+ do
+ if [[ "$o" == "--" ]]
+ then
+ bad_option=n
+ option__=" $*"
+ break
+ fi
+ done
+ [[ $bad_option == y ]] && echo "invalid argument '--'? Type -h for help" >&2 && $___exit 1
+ break
+ else
+ params="$params ${argument}"
+ fi
+
+done
+
+[[ -n "$get_argument" ]] && echo "option '-$option' requires an argument" >&2 && $___exit 1
+$___exit 0
+#echo params $params
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+exit 0 # TODO py3 for the moment no openvim library is installed
+
+# It uses following env, if not provided filling by default
+[ -z "$GIT_OVIM_URL" ] && GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
+[ -z "$DEVELOP" ] && DEVELOP=""
+# folder where RO is installed
+[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $HERE)
+[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
+[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
+[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+
+
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Install last stable source code of lib-osm-openvim and the needed packages"
+ echo -e " OPTIONS"
+ echo -e " -h --help: show this help"
+ echo -e " -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+ echo -e " -b master (main branch)"
+ echo -e " -b v2.0 (v2.0 branch)"
+ echo -e " -b tags/v1.1.0 (a specific tag)"
+ echo -e " ..."
+ echo -e " --develop: install last master version for developers"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+ "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+ case "${o}" in
+ b)
+ export COMMIT_ID=${OPTARG}
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+
+su $SUDO_USER -c "git -C '${BASEFOLDER}' clone ${GIT_OVIM_URL} lib-openvim" ||
+ ! echo "Error cannot clone from '${GIT_OVIM_URL}'" >&2 || exit 1
+if [[ -n $COMMIT_ID ]] ; then
+ echo -e "Installing lib-osm-openvim from refspec: $COMMIT_ID"
+ su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout $COMMIT_ID" ||
+ ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OVIM_URL}'" >&2 || exit 1
+elif [[ -z $DEVELOP ]]; then
+ LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/lib-openvim" tag -l "v[0-9]*" | sort -V | tail -n1`
+ echo -e "Installing lib-osm-openvim from refspec: tags/${LATEST_STABLE_TAG}"
+ su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout tags/${LATEST_STABLE_TAG}" ||
+ ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OVIM_URL}'" >&2 || exit 1
+else
+ echo -e "Installing lib-osm-openvim from refspec: master"
+fi
+
+make -C "${BASEFOLDER}/lib-openvim" prepare_lite
+export LANG="en_US.UTF-8"
+pip2 install -e "${BASEFOLDER}/lib-openvim/build" || ! echo "ERROR installing lib-osm-openvim library!!!" >&2 ||
+ exit 1
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#ONLY TESTED for Ubuntu 16.04
+#it configures openmano to run as a service
+
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Configures openmano to run as a service at /opt"
+ echo -e " OPTIONS"
+ echo -e " -u USER_OWNER user owner of the service, 'root' by default"
+ echo -e " -f PATH path where openmano source is located. If missing it is downloaded from git"
+ echo -e " -d --delete: if -f is provided, delete this path after copying to /opt"
+ echo -e " -h: show this help"
+ echo -e " --uninstall: remove created service and files"
+}
+
+function uninstall(){
+ echo "systemctl disable openmano.service " && systemctl disable openmano.service 2>/dev/null ||
+ echo " Already done"
+ echo "systemctl disable osm-ro.service " && systemctl disable osm-ro.service 2>/dev/null ||
+ echo " Already done"
+ echo "service openmano stop " && service openmano stop 2>/dev/null || echo " Already done"
+ echo "service osm-ro stop " && service osm-ro stop 2>/dev/null || echo " Already done"
+ for file in /opt/openmano /etc/default/openmanod.cfg /etc/osm/openmanod.cfg /var/log/openmano /var/log/osm/openmano* \
+ /etc/systemd/system/openmano.service /etc/systemd/system/osm-ro.service /usr/bin/openmano /usr/sbin/service-openmano \
+ /usr/bin/openmano-report
+ do
+ echo rm $file
+ rm -rf $file || ! echo "Can not delete '$file'. Needed root privileges?" >&2 || exit 1
+ done
+ echo "Done"
+}
+
+GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+USER_OWNER="root"
+QUIET_MODE=""
+FILE=""
+DELETE=""
+while getopts ":u:f:hdq-:" o; do
+ case "${o}" in
+ u)
+ export USER_OWNER="$OPTARG"
+ ;;
+ f)
+ export FILE="$OPTARG"
+ ;;
+ q)
+ export QUIET_MODE=yes
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ d)
+ DELETE=y
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "uninstall" ] && uninstall && exit 0
+ [ "${OPTARG}" == "delete" ] && DELETE=y && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit -1
+ ;;
+ esac
+done
+BAD_PATH_ERROR="Path '$FILE' does not contain a valid openmano distribution"
+
+#check root privileges
+[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+
+#Discover Linux distribution
+#try redhat type
+if [[ -f /etc/redhat-release ]]
+then
+ _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut -d" " -f1)
+else
+ #if not assuming ubuntu type
+ _DISTRO=$(lsb_release -is 2>/dev/null)
+fi
+if [[ "$_DISTRO" == "Ubuntu" ]]
+then
+ _RELEASE=$(lsb_release -rs)
+ if [[ ${_RELEASE%%.*} != 16 ]]
+ then
+ echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
+ fi
+else
+ echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
+fi
+
+
+if [[ -z "$FILE" ]]
+then
+ FILE=__temp__${RANDOM}
+ git clone $GIT_URL $FILE || ! echo "Cannot get openmano source code from $GIT_URL" >&2 || exit 1
+ DELETE=y
+else
+ [[ -d "$FILE" ]] || ! echo $BAD_PATH_ERROR >&2 || exit 1
+fi
+
+#make idempotent
+uninstall
+#copy files
+cp -r "$FILE" /opt/openmano || ! echo $BAD_PATH_ERROR >&2 || exit 1
+mkdir -p /etc/osm || echo "warning cannot create config folder '/etc/osm'"
+cp /opt/openmano/osm_ro/openmanod.cfg /etc/osm/openmanod.cfg ||
+ echo "warning cannot create file '/etc/osm/openmanod.cfg'"
+mkdir -p /var/log/osm || echo "warning cannot create log folder '/var/log/osm'"
+#makes links
+ln -s -v /opt/openmano/openmano /usr/bin/openmano
+ln -s -v /opt/openmano/scripts/service-openmano /usr/sbin/service-openmano
+ln -s -v /opt/openmano/scripts/openmano-report /usr/bin/openmano-report
+
+chown -R $SUDO_USER /opt/openmano
+
+mkdir -p /etc/systemd/system/
+cat > /etc/systemd/system/osm-ro.service << EOF
+[Unit]
+Description=openmano server
+
+[Service]
+User=${USER_OWNER}
+ExecStart=/opt/openmano/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+[[ -n $DELETE ]] && rm -rf "${FILE}"
+
+service osm-ro start
+systemctl enable osm-ro.service
+
+echo Done
+exit
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#ONLY TESTED in Ubuntu 16.04 partially tested in Ubuntu 14.10 14.04 16.04, CentOS7 and RHEL7
+#Get needed packages, source code and configure to run openmano
+#Ask for database user and password if not provided
+
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Install last stable source code in ./openmano and the needed packages"
+ echo -e "On a Ubuntu 16.04 it configures openmano as a service"
+ echo -e " OPTIONS"
+ echo -e " -u USER: database admin user. 'root' by default. Prompts if needed"
+ echo -e " -p PASS: database admin password to be used or installed. Prompts if needed"
+ echo -e " -q --quiet: install in unattended mode"
+ echo -e " -h --help: show this help"
+ echo -e " -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+ echo -e " -b master (main RO branch)"
+ echo -e " -b v2.0 (v2.0 branch)"
+ echo -e " -b tags/v1.1.0 (a specific tag)"
+ echo -e " ..."
+ echo -e " --develop: install last version for developers, and do not configure as a service"
+ echo -e " --forcedb: reinstall mano_db DB, deleting previous database if exists and creating a new one"
+ echo -e " --updatedb: do not reinstall mano_db DB if it exists, just update database"
+ echo -e " --force: makes idenpotent, delete previous installations folders if needed. It assumes --updatedb if --forcedb option is not provided"
+ echo -e " --noclone: assumes that openmano was cloned previously and that this script is run from the local repo"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+ echo -e " --no-db: do not install mysql server"
+}
+
+function install_packages(){
+ [ -x /usr/bin/apt-get ] && apt-get install -y $*
+ [ -x /usr/bin/yum ] && yum install -y $*
+
+ #check properly installed
+ for PACKAGE in $*
+ do
+ PACKAGE_INSTALLED="no"
+ [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
+ [ -x /usr/bin/yum ] && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
+ if [ "$PACKAGE_INSTALLED" = "no" ]
+ then
+ echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
+ exit 1
+ fi
+ done
+}
+
+function ask_user(){
+ # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
+ # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
+ # Return: true(0) if user type 'yes'; false (1) if user type 'no'
+ read -e -p "$1" USER_CONFIRMATION
+ while true ; do
+ [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
+ [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
+ [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
+ [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
+ read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
+ done
+}
+
+GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+export GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
+export GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
+DBUSER="root"
+DBPASSWD=""
+DBPASSWD_PARAM=""
+QUIET_MODE=""
+DEVELOP=""
+DB_FORCE_UPDATE=""
+UPDATEDB=""
+FORCE=""
+NOCLONE=""
+NO_PACKAGES=""
+NO_DB=""
+COMMIT_ID=""
+
+while getopts ":u:p:b:hiq-:" o; do
+ case "${o}" in
+ u)
+ export DBUSER="$OPTARG"
+ ;;
+ p)
+ export DBPASSWD="$OPTARG"
+ export DBPASSWD_PARAM="-p$OPTARG"
+ ;;
+ b)
+ export COMMIT_ID=${OPTARG}
+ ;;
+ q)
+ export QUIET_MODE=yes
+ export DEBIAN_FRONTEND=noninteractive
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+ [ "${OPTARG}" == "forcedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--forcedb" && continue
+ [ "${OPTARG}" == "updatedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--updatedb" && continue
+ [ "${OPTARG}" == "force" ] && FORCE="y" && continue
+ [ "${OPTARG}" == "noclone" ] && NOCLONE="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ [ "${OPTARG}" == "no-db" ] && NO_DB="y" && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+
+if [ "$DB_FORCE_UPDATE" == "--forcedb--updatedb" ] || [ "$DB_FORCE_UPDATE" == "--updatedb--forcedb" ] ; then
+ echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
+ exit 1
+elif [ -n "$FORCE" ] && [ -z "$DB_FORCE_UPDATE" ] ; then
+ DB_FORCE_UPDATE="--updatedb"
+fi
+
+#check root privileges and non a root user behind
+[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+if [[ -z "$SUDO_USER" ]] || [[ "$SUDO_USER" = "root" ]]
+then
+ [[ -z $QUIET_MODE ]] && ! ask_user "Install in the root user (y/N)? " n && echo "Cancelled" && exit 1
+ export SUDO_USER=root
+fi
+
+# Discover Linux distribution
+# try redhat type
+[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut -d" " -f1)
+# if not assuming ubuntu type
+[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is 2>/dev/null)
+if [ "$_DISTRO" == "Ubuntu" ]
+then
+ _RELEASE=$(lsb_release -rs)
+ if [[ ${_RELEASE%%.*} != 14 ]] && [[ ${_RELEASE%%.*} != 16 ]]
+ then
+ [[ -z $QUIET_MODE ]] &&
+ ! ask_user "WARNING! Not tested Ubuntu version. Continue assuming a trusty (14.XX)' (y/N)? " n &&
+ echo "Cancelled" && exit 1
+ _RELEASE = 14
+ fi
+elif [ "$_DISTRO" == "CentOS" ]
+then
+ _RELEASE="7"
+ if ! cat /etc/redhat-release | grep -q "7."
+ then
+ [[ -z $QUIET_MODE ]] &&
+ ! ask_user "WARNING! Not tested CentOS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
+ echo "Cancelled" && exit 1
+ fi
+elif [ "$_DISTRO" == "Red" ]
+then
+ _RELEASE="7"
+ if ! cat /etc/redhat-release | grep -q "7."
+ then
+ [[ -z $QUIET_MODE ]] &&
+ ! ask_user "WARNING! Not tested Red Hat OS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
+ echo "Cancelled" && exit 1
+ fi
+else #[ "$_DISTRO" != "Ubuntu" -a "$_DISTRO" != "CentOS" -a "$_DISTRO" != "Red" ]
+ _DISTRO_DISCOVER=$_DISTRO
+ [ -x /usr/bin/apt-get ] && _DISTRO="Ubuntu" && _RELEASE="14"
+ [ -x /usr/bin/yum ] && _DISTRO="CentOS" && _RELEASE="7"
+ [[ -z $QUIET_MODE ]] &&
+ ! ask_user "WARNING! Not tested Linux distribution '$_DISTRO_DISCOVER '. Continue assuming a '$_DISTRO $_RELEASE' type (y/N)? " n &&
+ echo "Cancelled" && exit 1
+fi
+
+export _DISTRO="$_DISTRO"
+#check if installed as a service
+INSTALL_AS_A_SERVICE=""
+[[ "$_DISTRO" == "Ubuntu" ]] && [[ ${_RELEASE%%.*} == 16 ]] && [[ -z $DEVELOP ]] && INSTALL_AS_A_SERVICE="y"
+
+# Next operations require knowing BASEFOLDER
+if [[ -z "$NOCLONE" ]]; then
+ if [[ -n "$INSTALL_AS_A_SERVICE" ]] ; then
+ export BASEFOLDER=__openmano__${RANDOM}
+ else
+ export BASEFOLDER="${PWD}/openmano"
+ fi
+ [[ -n "$FORCE" ]] && rm -rf $BASEFOLDER #make idempotent
+else
+ HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+ export BASEFOLDER=$(dirname $HERE)
+fi
+
+if [[ -z "$NO_PACKAGES" ]]
+then
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### UPDATE REPOSITORIES #####\n"\
+ "#################################################################"
+ [ "$_DISTRO" == "Ubuntu" ] && apt-get update -y &&
+ add-apt-repository -y cloud-archive:queens && apt-get update -y
+
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y
+ [ "$_DISTRO" == "CentOS" ] && yum install -y epel-release
+ [ "$_DISTRO" == "Red" ] && wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm \
+ && rpm -ivh epel-release-7-5.noarch.rpm && yum install -y epel-release && rm -f epel-release-7-5.noarch.rpm
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum repolist
+
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### INSTALL REQUIRED PACKAGES #####\n"\
+ "#################################################################"
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-client"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb-client"
+
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### INSTALL PYTHON PACKAGES #####\n"\
+ "#################################################################"
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema "\
+ "python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev "\
+ "python-pip python-crypto python-networkx"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema "\
+ "python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel "\
+ "python-pip python-crypto python-networkx"
+ # The only way to install python-bottle on Centos7 is with easy_install or pip
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
+
+ # required for vmware connector TODO move that to separete opt in install script
+ pip2 install pip==9.0.3 || exit 1 # --upgrade pip install pip 10 that does not work
+ pip2 install pyvcloud==19.1.1 || exit 1
+ pip2 install progressbar || exit 1
+ pip2 install prettytable || exit 1
+ pip2 install pyvmomi || exit 1
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "genisoimage"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "genisoimage"
+
+ # required for fog connector
+ pip2 install fog05rest || exit 1
+
+ # required for OpenNebula connector
+ pip2 install untangle || exit 1
+ pip2 install pyone || exit 1
+ pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca || exit 1
+
+ # required for AWS connector
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-boto"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-boto" #TODO check if at Centos it exists with this name, or PIP should be used
+
+ # install openstack client needed for using openstack as a VIM
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-novaclient python-keystoneclient python-glanceclient "\
+ "python-neutronclient python-cinderclient python-openstackclient "\
+ "python-networking-l2gw"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-devel" && easy_install \
+ python-novaclient python-keystoneclient python-glanceclient python-neutronclient python-cinderclient \
+ python-openstackclient python-networking-l2gw #TODO revise if gcc python-pip is needed
+
+ # required for Azure
+ pip2 install azure
+
+fi # [[ -z "$NO_PACKAGES" ]]
+
+if [[ -z $NOCLONE ]]; then
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### DOWNLOAD SOURCE #####\n"\
+ "#################################################################"
+ if [[ -d "${BASEFOLDER}" ]] ; then
+ if [[ -n "$FORCE" ]] ; then
+ echo "deleting '${BASEFOLDER}' folder"
+ rm -rf "$BASEFOLDER" #make idempotent
+ elif [[ -z "$QUIET_MODE" ]] ; then
+ ! ask_user "folder '${BASEFOLDER}' exists, overwrite (y/N)? " n && echo "Cancelled!" && exit 1
+ rm -rf "$BASEFOLDER"
+ else
+ echo "'${BASEFOLDER}' folder exists. Use "--force" to overwrite" >&2 && exit 1
+ fi
+ fi
+ su $SUDO_USER -c "git clone ${GIT_URL} ${BASEFOLDER}" || ! echo "Error cannot clone from '$GIT_URL'" >&2 || exit 1
+ if [[ -n $COMMIT_ID ]] ; then
+ echo -e "Installing osm-RO from refspec: $COMMIT_ID"
+ su $SUDO_USER -c "git -C ${BASEFOLDER} checkout $COMMIT_ID" ||
+ ! echo "Error cannot checkout '$COMMIT_ID' from '$GIT_URL'" >&2 || exit 1
+ elif [[ -z $DEVELOP ]]; then
+ LATEST_STABLE_TAG=`git -C "${BASEFOLDER}" tag -l "v[0-9]*" | sort -V | tail -n1`
+ echo -e "Installing osm-RO from refspec: tags/${LATEST_STABLE_TAG}"
+ su $SUDO_USER -c "git -C ${BASEFOLDER} checkout tags/${LATEST_STABLE_TAG}" ||
+ ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '$GIT_URL'" >&2 || exit 1
+ else
+ echo -e "Installing osm-RO from refspec: master"
+ fi
+ su $SUDO_USER -c "cp ${BASEFOLDER}/.gitignore-common ${BASEFOLDER}/.gitignore"
+fi
+
+echo -e "\n"\
+ "#################################################################\n"\
+ "##### INSTALLING OSM-IM LIBRARY #####\n"\
+ "#################################################################"
+ ${BASEFOLDER}/scripts/install-osm-im.sh
+ OSM_IM_PATH=`python -c 'import osm_im; print osm_im.__path__[0]'` ||
+ ! echo "ERROR installing python-osm-im library!!!" >&2 || exit 1
+
+echo -e "\n"\
+ "#################################################################\n"\
+ "##### INSTALLING OVIM LIBRARY #####\n"\
+ "#################################################################"
+ ${BASEFOLDER}/scripts/install-lib-osm-openvim.sh
+ OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` ||
+ ! echo "ERROR installing python-lib-osm-openvim library!!!" >&2 || exit 1
+
+if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
+then
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### CONFIGURE firewalld #####\n"\
+ "#################################################################"
+ if [[ -z $QUIET_MODE ]] || ask_user "Configure firewalld for openmanod port 9090 (Y/n)? " y
+ then
+ #Creates a service file for openmano
+ echo '<?xml version="1.0" encoding="utf-8"?>
+<service>
+ <short>openmanod</short>
+ <description>openmanod service</description>
+ <port protocol="tcp" port="9090"/>
+</service>' > /etc/firewalld/services/openmanod.xml
+ #put proper permissions
+ pushd /etc/firewalld/services > /dev/null
+ restorecon openmanod.xml
+ chmod 640 openmanod.xml
+ popd > /dev/null
+ #Add the openmanod service to the default zone permanently and reload the firewall configuration
+ firewall-cmd --permanent --add-service=openmanod > /dev/null
+ firewall-cmd --reload > /dev/null
+ echo "done."
+ else
+ echo "skipping."
+ fi
+fi
+
+echo -e "\n"\
+ "#################################################################\n"\
+ "##### CONFIGURE OPENMANO CLIENT #####\n"\
+ "#################################################################"
+#creates a link at ~/bin if not configured as a service
+if [[ -z "$INSTALL_AS_A_SERVICE" ]]
+then
+ su $SUDO_USER -c 'mkdir -p ${HOME}/bin'
+ su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano'
+ su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano-report'
+ su $SUDO_USER -c 'rm -f ${HOME}/bin/service-openmano'
+ su $SUDO_USER -c "ln -s '${BASEFOLDER}/openmano' "'${HOME}/bin/openmano'
+ su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/openmano-report.sh' "'${HOME}/bin/openmano-report'
+ su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/service-openmano' "'${HOME}/bin/service-openmano'
+
+ #insert /home/<user>/bin in the PATH
+ #skiped because normally this is done authomatically when ~/bin exists
+ #if ! su $SUDO_USER -c 'echo $PATH' | grep -q "${HOME}/bin"
+ #then
+ # echo " inserting /home/$SUDO_USER/bin in the PATH at .bashrc"
+ # su $SUDO_USER -c 'echo "PATH=\$PATH:\${HOME}/bin" >> ~/.bashrc'
+ #fi
+
+ if [[ $SUDO_USER == root ]]
+ then
+ if ! echo $PATH | grep -q "${HOME}/bin"
+ then
+ echo "PATH=\$PATH:\${HOME}/bin" >> ${HOME}/.bashrc
+ fi
+ fi
+fi
+
+#configure arg-autocomplete for this user
+#in case of minimal instalation this package is not installed by default
+[[ "$_DISTRO" == "CentOS" || "$_DISTRO" == "Red" ]] && yum install -y bash-completion
+#su $SUDO_USER -c 'mkdir -p ~/.bash_completion.d'
+su $SUDO_USER -c 'activate-global-python-argcomplete --user'
+if ! su $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
+then
+ echo " inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
+ su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
+fi
+
+if [ -z "$NO_DB" ]; then
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### INSTALL DATABASE SERVER #####\n"\
+ "#################################################################"
+
+ if [ -n "$QUIET_MODE" ]; then
+ DB_QUIET='-q'
+ fi
+ ${BASEFOLDER}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} $DB_QUIET $DB_FORCE_UPDATE || exit 1
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### CREATE AND INIT MANO_VIM DATABASE #####\n"\
+ "#################################################################"
+ # Install mano_vim_db after setup
+ ${OSMLIBOVIM_PATH}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db --no-install-packages $DB_QUIET $DB_FORCE_UPDATE || exit 1
+fi # [ -z "$NO_DB" ]
+
+if [[ -n "$INSTALL_AS_A_SERVICE" ]]
+then
+ echo -e "\n"\
+ "#################################################################\n"\
+ "##### CONFIGURE OPENMANO SERVICE #####\n"\
+ "#################################################################"
+
+ ${BASEFOLDER}/scripts/install-openmano-service.sh -f ${BASEFOLDER} `[[ -z "$NOCLONE" ]] && echo "-d"`
+ # rm -rf ${BASEFOLDER}
+ # alias service-openmano="service openmano"
+ # echo 'alias service-openmano="service openmano"' >> ${HOME}/.bashrc
+ echo
+ echo "Done! installed at /opt/openmano"
+ echo " Manage server with 'sudo -E service osm-ro start|stop|status|...' "
+else
+ echo
+ echo "Done! you may need to logout and login again for loading client configuration"
+ echo " Run './${BASEFOLDER}/scripts/service-openmano start' for starting openmano in a screen"
+fi
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+# It uses following env, if not provided filling by default
+[ -z "$GIT_OSMIM_URL" ] && GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
+[ -z "$DEVELOP" ] && DEVELOP=""
+# folder where RO is installed
+[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $(dirname $HERE))
+[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
+[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
+[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Install last stable source code of osm-im and the needed packages"
+ echo -e " OPTIONS"
+ echo -e " -h --help: show this help"
+ echo -e " -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+ echo -e " -b master (main branch)"
+ echo -e " -b v2.0 (v2.0 branch)"
+ echo -e " -b tags/v1.1.0 (a specific tag)"
+ echo -e " ..."
+ echo -e " --develop: install last master version for developers"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+ "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+ case "${o}" in
+ b)
+ export COMMIT_ID=${OPTARG}
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+
+su $SUDO_USER -c "git -C ${BASEFOLDER} clone ${GIT_OSMIM_URL} IM" ||
+ ! echo "Error cannot clone from '${GIT_OSMIM_URL}'" >&2 || exit 1
+if [[ -n $COMMIT_ID ]] ; then
+ echo -e "Installing osm-IM from refspec: $COMMIT_ID"
+ su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout $COMMIT_ID" ||
+ ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OSMIM_URL}'" >&2 || exit 1
+elif [[ -z $DEVELOP ]]; then
+ LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/IM" tag -l "v[0-9]*" | sort -V | tail -n1`
+ echo -e "Installing osm-IM from refspec: tags/${LATEST_STABLE_TAG}"
+ su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout tags/${LATEST_STABLE_TAG}" ||
+ ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OSMIM_URL}'" >&2 || exit 1
+else
+ echo -e "Installing osm-IM from refspec: master"
+fi
+
+# Install debian dependencies before setup.py
+if [[ -z "$NO_PACKAGES" ]]
+then
+ # apt-get update
+ # apt-get install -y git python-pip
+ # pip2 install pip==9.0.3
+ python3 -m pip install pyangbind || exit 1
+fi
+
+PYBINDPLUGIN=$(python3 -c 'import pyangbind; import os; print(os.path.dirname(pyangbind.__file__)+"/plugin")')
+su $SUDO_USER -c 'mkdir -p "'${BASEFOLDER}/IM/osm_im'"'
+su $SUDO_USER -c 'touch "'${BASEFOLDER}/IM/osm_im/__init__.py'"'
+# wget -q https://raw.githubusercontent.com/RIFTIO/RIFT.ware/RIFT.ware-4.4.1/modules/core/util/yangtools/yang/rw-pb-ext.yang -O "${BASEFOLDER}/IM/models/yang/rw-pb-ext.yang"
+for target in vnfd nsd ; do
+ pyang -Werror --path "${BASEFOLDER}/IM/models/yang" --plugindir "${PYBINDPLUGIN}" -f pybind \
+ -o "${BASEFOLDER}/IM/osm_im/${target}.py" "${BASEFOLDER}/IM/models/yang/${target}.yang"
+done
+
+python3 -m pip install -e "${BASEFOLDER}/IM" || ! echo "ERROR installing python-osm-im library!!!" >&2 || exit 1
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#It generates a report for debugging
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+OMCLIENT=openmano
+
+#get screen log files at the beginning
+echo
+echo "-------------------------------"
+echo "log files"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "cat /var/log/osm/openmano.log*"
+cat /var/log/osm/openmano.log*
+echo
+echo "-------------------------------"
+echo
+
+#get version
+echo
+echo "-------------------------------"
+echo "version"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "openmanod --version"
+openmanod --version
+echo
+echo "-------------------------------"
+echo
+
+#get configuration files
+echo "-------------------------------"
+echo "Configuration files"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "cat /etc/osm/openmanod.cfg"
+cat /etc/osm/openmanod.cfg
+echo "-------------------------------"
+echo
+
+#get list of items
+for verbose in "" "-vvv"
+do
+ echo "-------------------------------"
+ echo "OPENMANO$verbose"
+ echo "-------------------------------"
+ echo "$OMCLIENT config $verbose"
+ $OMCLIENT config
+ echo "-------------------------------"
+ echo "$OMCLIENT tenant-list $verbose"
+ $OMCLIENT tenant-list $verbose
+ echo "-------------------------------"
+ echo "$OMCLIENT datacenter-list --all"
+ $OMCLIENT datacenter-list --all
+ echo "-------------------------------"
+ echo "$OMCLIENT datacenter-list $verbose"
+ $OMCLIENT datacenter-list $verbose
+ echo "-------------------------------"
+ dclist=`$OMCLIENT datacenter-list |awk '{print $1}'`
+ for dc in $dclist; do
+ echo "$OMCLIENT datacenter-net-list $dc $verbose"
+ $OMCLIENT datacenter-net-list $dc $verbose
+ echo "-------------------------------"
+ done
+ echo "$OMCLIENT vnf-list $verbose"
+ $OMCLIENT vnf-list $verbose
+ echo "-------------------------------"
+ vnflist=`$OMCLIENT vnf-list |awk '$1!="No" {print $1}'`
+ for vnf in $vnflist; do
+ echo "$OMCLIENT vnf-list $vnf $verbose"
+ $OMCLIENT vnf-list $vnf $verbose
+ echo "-------------------------------"
+ done
+ echo "$OMCLIENT scenario-list $verbose"
+ $OMCLIENT scenario-list $verbose
+ echo "-------------------------------"
+ scenariolist=`$OMCLIENT scenario-list |awk '$1!="No" {print $1}'`
+ for sce in $scenariolist; do
+ echo "$OMCLIENT scenario-list $sce $verbose"
+ $OMCLIENT scenario-list $sce $verbose
+ echo "-------------------------------"
+ done
+ echo "$OMCLIENT instance-scenario-list $verbose"
+ $OMCLIENT instance-scenario-list $verbose
+ echo "-------------------------------"
+ instancelist=`$OMCLIENT instance-scenario-list |awk '$1!="No" {print $1}'`
+ for i in $instancelist; do
+ echo "$OMCLIENT instance-scenario-list $i $verbose"
+ $OMCLIENT instance-scenario-list $i $verbose
+ echo "-------------------------------"
+ done
+ echo
+
+done
+echo
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+#launch openmano inside a screen.
+#or call service if it is installed on systemd
+
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+DIR_OM=$(dirname $DIRNAME )
+
+function usage(){
+ echo -e "Usage: $0 [openmano/mano] start|stop|restart|status"
+ echo -e " Launch|Removes|Restart|Getstatus openmano on a screen/service"
+ echo -e " -n --screen-name NAME : name of screen to launch openmano (default mano or service)"
+ echo -e " -h --help: shows this help"
+ echo -e " -- PARAMS use to separate PARAMS that will be send to the service. e.g. -pPORT -PADMINPORT --dbname=DDBB"
+}
+
+
+function kill_pid(){
+ #send TERM signal and wait 5 seconds and send KILL signal ir still running
+ #PARAMS: $1: PID of process to terminate
+ kill $1 #send TERM signal
+ WAIT=5
+ while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
+ do
+ sleep 1
+ WAIT=$((WAIT-1))
+ [ $WAIT -eq 0 ] && echo -n "sending SIGKILL... " && kill -9 $1 #kill when count reach 0
+ done
+ echo "done"
+}
+
+#process options
+source ${DIRNAME}/get-options.sh "screen-name:n= help:h --" $* || exit 1
+
+#help
+[ -n "$option_help" ] && usage && exit 0
+
+
+#obtain parameters
+om_list=""
+#om_action="start" #uncoment to get a default action
+action_list=""
+om_params="$option__"
+
+for param in $params
+do
+ [ "$param" == "start" -o "$param" == "stop" -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param && continue
+ [ "$param" == "openmano" -o "$param" == "mano" ] && om_list="$om_list mano" && continue
+ #short options
+ echo "invalid argument '$param'? Type -h for help" >&2 && exit 1
+done
+
+[[ -n $option_screen_name ]] && option_screen_name=${option_screen_name#*.} #allow the format 'pid.name' and keep only name
+#check action is provided
+[ -z "$om_action" ] && usage >&2 && exit -1
+
+#if no componenets supplied assume all
+[ -z "$om_list" ] && om_list="mano"
+
+function find_process_id(){ #PARAMS: command screen-name
+ for process_id in `ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep "${1}" | awk '{print $1}'`
+ do
+ scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+ scname=${scname#STY=}
+ [[ -n "$2" ]] && [[ "${scname#*.}" != "$2" ]] && continue
+ echo -n "${process_id} "
+ done
+ echo
+}
+
+for om_component in $om_list
+do
+ screen_name="${om_component}"
+ [[ -n "$option_screen_name" ]] && screen_name=$option_screen_name
+ [ "${om_component}" == "mano" ] && om_cmd="./openmanod" && om_name="openmano " && om_dir=$(readlink -f ${DIR_OM})
+ #obtain PID of program
+ component_id=`find_process_id "${om_cmd}" $option_screen_name`
+ processes=$(echo $component_id | wc -w)
+
+ #status
+ if [ "$om_action" == "status" ]
+ then
+ running=""
+ for process_id in $component_id
+ do
+ scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+ scname=${scname#STY=}
+ [[ -n "$option_screen_name" ]] && [[ "${scname#*.}" != "$option_screen_name" ]] && continue
+ printf "%-15s" "pid: ${process_id},"
+ [[ -n "$scname" ]] && printf "%-25s" "screen: ${scname},"
+ echo cmd: $(ps -o cmd p $process_id | tail -n1 )
+ running=y
+ done
+ #if installed as a service and it is not provided a screen name call service
+ [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && running=y #&& service osm-ro status
+ if [ -z "$running" ]
+ then
+ echo -n " $om_name not running" && [[ -n "$option_screen_name" ]] && echo " on screen '$option_screen_name'" || echo
+ fi
+ fi
+
+ #if installed as a service and it is not provided a screen name call service
+ [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && service osm-ro $om_action && ( [[ $om_action == status ]] || sleep 5 ) && exit $?
+
+
+ #stop
+ if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
+ then
+ #terminates program
+ [ $processes -gt 1 ] && echo "$processes processes are running, specify with --screen-name" && continue
+ [ $processes -eq 1 ] && echo -n " stopping $om_name ... " && kill_pid $component_id
+ component_id=""
+ #terminates screen
+ if screen -wipe | grep -q -e "\.${screen_name}\b"
+ then
+ screen -S $screen_name -p 0 -X stuff "exit\n" || echo
+ sleep 1
+ fi
+ fi
+
+ #start
+ if [ "$om_action" == "start" -o "$om_action" == "restart" ]
+ then
+ #calculates log file name
+ logfile=""
+ mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${screen_name}.log || echo "can not create logs directory $DIR_OM/logs"
+ #check already running
+ [ -n "$component_id" ] && echo " $om_name is already running. Skipping" && continue
+ #create screen if not created
+ echo -n " starting $om_name ... "
+ if ! screen -wipe | grep -q -e "\.${screen_name}\b"
+ then
+ pushd ${om_dir} > /dev/null
+ screen -dmS ${screen_name} bash
+ sleep 1
+ popd > /dev/null
+ else
+ echo -n " using existing screen '${screen_name}' ... "
+ screen -S ${screen_name} -p 0 -X log off
+ screen -S ${screen_name} -p 0 -X stuff "cd ${om_dir}\n"
+ sleep 1
+ fi
+ #move old log file index one number up and log again in index 0
+ if [[ -n $logfile ]]
+ then
+ for index in 8 7 6 5 4 3 2 1
+ do
+ [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
+ done
+ [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
+ screen -S ${screen_name} -p 0 -X logfile ${logfile}
+ screen -S ${screen_name} -p 0 -X log on
+ fi
+ #launch command to screen
+ screen -S ${screen_name} -p 0 -X stuff "${om_cmd}${om_params}\n"
+ #check if is running
+ [[ -n $logfile ]] && timeout=120 #2 minute
+ [[ -z $logfile ]] && timeout=20
+ while [[ $timeout -gt 0 ]]
+ do
+ #check if is running
+ #echo timeout $timeout
+ #if ! ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
+ log_lines=0
+ [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
+ component_id=`find_process_id "${om_cmd}${om_params}" $screen_name`
+ if [[ -z $component_id ]]
+ then #process not started or finished
+ [[ $log_lines -ge 2 ]] && echo -n "ERROR, it has exited." && break
+ #started because writted serveral lines at log so report error
+ fi
+ [[ -n $logfile ]] && grep -q "open${om_component}d ready" ${logfile} && break
+ sleep 1
+ timeout=$((timeout -1))
+ done
+ if [[ -n $logfile ]] && [[ $timeout == 0 ]]
+ then
+ echo -n "timeout!"
+ else
+ echo -n "running on 'screen -x ${screen_name}'."
+ fi
+ [[ -n $logfile ]] && echo " Logging at '${logfile}'" || echo
+ fi
+done
+
+
+
+
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+"""
+This is the thread for the http server North API.
+Two thread will be launched, with normal and administrative permissions.
+"""
+import yaml
+from uuid import uuid4
+from http import HTTPStatus
+
+__author__ = "Alfonso Tierno"
+__date__ = "2019-10-22"
+__version__ = "0.1"
+version_date = "Oct 2019"
+
+
+class SdnException(Exception):
+ def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST.value):
+ self.http_code = http_code
+ Exception.__init__(self, message)
+
+
+class Sdn:
+
+ def __init__(self, db, plugins):
+ self.db = db
+ self.plugins = plugins
+
+ def start_service(self):
+ pass # TODO py3 needed to load wims and plugins
+
+ def stop_service(self):
+ pass # nothing needed
+
+ def show_network(self, uuid):
+ pass
+
+ def delete_network(self, uuid):
+ pass
+
+ def new_network(self, network):
+ pass
+
+ def get_openflow_rules(self, network_id=None):
+ """
+ Get openflow id from DB
+ :param network_id: Network id, if none all networks will be retrieved
+ :return: Return a list with Openflow rules per net
+ """
+ # ignore input data
+ if not network_id:
+
+ where_ = {}
+ else:
+ where_ = {"net_id": network_id}
+ result, content = self.db.get_table(
+ SELECT=("name", "net_id", "ofc_id", "priority", "vlan_id", "ingress_port", "src_mac", "dst_mac", "actions"),
+ WHERE=where_, FROM='of_flows')
+
+ if result < 0:
+ raise SdnException(str(content), -result)
+ return content
+
+ def edit_openflow_rules(self, network_id=None):
+ """
+ To make actions over the net. The action is to reinstall the openflow rules
+ network_id can be 'all'
+ :param network_id: Network id, if none all networks will be retrieved
+ :return : Number of nets updated
+ """
+
+ # ignore input data
+ if not network_id:
+ where_ = {}
+ else:
+ where_ = {"uuid": network_id}
+ result, content = self.db.get_table(SELECT=("uuid", "type"), WHERE=where_, FROM='nets')
+
+ if result < 0:
+ raise SdnException(str(content), -result)
+
+ for net in content:
+ if net["type"] != "ptp" and net["type"] != "data":
+ result -= 1
+ continue
+
+ try:
+ self.net_update_ofc_thread(net['uuid'])
+ except SdnException as e:
+ raise SdnException("Error updating network'{}' {}".format(net['uuid'], e),
+ HTTPStatus.INTERNAL_SERVER_ERROR.value)
+ except Exception as e:
+ raise SdnException("Error updating network '{}' {}".format(net['uuid'], e),
+ HTTPStatus.INTERNAL_SERVER_ERROR.value)
+
+ return result
+
+ def delete_openflow_rules(self, ofc_id=None):
+ """
+ To make actions over the net. The action is to delete ALL openflow rules
+ :return: return operation result
+ """
+
+ if not ofc_id:
+ if 'Default' in self.config['ofcs_thread']:
+ r, c = self.config['ofcs_thread']['Default'].insert_task("clear-all")
+ else:
+ raise SdnException("Default Openflow controller not not running", HTTPStatus.NOT_FOUND.value)
+
+ elif ofc_id in self.config['ofcs_thread']:
+ r, c = self.config['ofcs_thread'][ofc_id].insert_task("clear-all")
+
+ # ignore input data
+ if r < 0:
+ raise SdnException(str(c), -r)
+ else:
+ raise SdnException("Openflow controller not found with ofc_id={}".format(ofc_id),
+ HTTPStatus.NOT_FOUND.value)
+ return r
+
+ def get_openflow_ports(self, ofc_id=None):
+ """
+ Obtain switch ports names of openflow controller
+ :return: Return flow ports in DB
+ """
+ if not ofc_id:
+ if 'Default' in self.config['ofcs_thread']:
+ conn = self.config['ofcs_thread']['Default'].OF_connector
+ else:
+ raise SdnException("Default Openflow controller not not running", HTTPStatus.NOT_FOUND.value)
+
+ elif ofc_id in self.config['ofcs_thread']:
+ conn = self.config['ofcs_thread'][ofc_id].OF_connector
+ else:
+ raise SdnException("Openflow controller not found with ofc_id={}".format(ofc_id),
+ HTTPStatus.NOT_FOUND.value)
+ return conn.pp2ofi
+
+ def new_of_controller(self, ofc_data):
+ """
+ Create a new openflow controller into DB
+ :param ofc_data: Dict openflow controller data
+ :return: openflow controller uuid
+ """
+ db_wim = {
+ "uuid": str(uuid4()),
+ "name": ofc_data["name"],
+ "description": ofc_data.get("description"),
+ "type": ofc_data["type"],
+ "wim_url": ofc_data.get("url"),
+ }
+ if not db_wim["wim_url"]:
+ if not ofc_data.get("ip") or not ofc_data.get("port"):
+ raise SdnException("Provide either 'url' or both 'ip' and 'port'")
+ db_wim["wim_url"] = "{}:{}".format(ofc_data["ip"], ofc_data["port"])
+
+ db_wim_account = {
+ "uuid": str(uuid4()),
+ "name": ofc_data["name"],
+ "wim_id": db_wim["uuid"],
+ "sdn": "true",
+ "user": ofc_data.get("user"),
+ "password": ofc_data.get("password"),
+ }
+ db_wim_account_config = ofc_data.get("config", {})
+ if ofc_data.get("dpid"):
+ db_wim_account_config["dpid"] = ofc_data["dpid"]
+ if ofc_data.get("version"):
+ db_wim_account_config["version"] = ofc_data["version"]
+
+ db_wim_account["config"] = yaml.safe_dump(db_wim_account_config, default_flow_style=True, width=256)
+
+ db_tables = [
+ {"wims": db_wim},
+ {"wim_accounts": db_wim_account},
+ ]
+ uuid_list = [db_wim["uuid"], db_wim_account["uuid"]]
+ self.db.new_rows(db_tables, uuid_list)
+ return db_wim_account["uuid"]
+
+ def edit_of_controller(self, of_id, ofc_data):
+ """
+ Edit an openflow controller entry from DB
+ :return:
+ """
+ if not ofc_data:
+ raise SdnException("No data received during uptade OF contorller",
+ http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value)
+
+ # get database wim_accounts
+ wim_account = self._get_of_controller(of_id)
+
+ db_wim_update = {x: ofc_data[x] for x in ("name", "description", "type", "wim_url") if x in ofc_data}
+ db_wim_account_update = {x: ofc_data[x] for x in ("name", "user", "password") if x in ofc_data}
+ db_wim_account_config = ofc_data.get("config", {})
+
+ if ofc_data.get("ip") or ofc_data.get("port"):
+ if not ofc_data.get("ip") or not ofc_data.get("port"):
+ raise SdnException("Provide or both 'ip' and 'port'")
+ db_wim_update["wim_url"] = "{}:{}".format(ofc_data["ip"], ofc_data["port"])
+
+ if ofc_data.get("dpid"):
+ db_wim_account_config["dpid"] = ofc_data["dpid"]
+ if ofc_data.get("version"):
+ db_wim_account_config["version"] = ofc_data["version"]
+
+ if db_wim_account_config:
+ db_wim_account_update["config"] = yaml.load(wim_account["config"], Loader=yaml.Loader) or {}
+ db_wim_account_update["config"].update(db_wim_account_config)
+ db_wim_account_update["config"] = yaml.safe_dump(db_wim_account_update["config"], default_flow_style=True,
+ width=256)
+
+ if db_wim_account_update:
+ self.db.update_rows('wim_accounts', db_wim_account_update, WHERE={'uuid': of_id})
+ if db_wim_update:
+ self.db.update_rows('wims', db_wim_update, WHERE={'uuid': wim_account["wim_id"]})
+
+ def _get_of_controller(self, of_id):
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE={"uuid": of_id, "sdn": "true"})
+
+ if not wim_accounts:
+ raise SdnException("Cannot find sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ elif len(wim_accounts) > 1:
+ raise SdnException("Found more than one sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.CONFLICT.value)
+ return wim_accounts[0]
+
+ def delete_of_controller(self, of_id):
+ """
+ Delete an openflow controller from DB.
+ :param of_id: openflow controller dpid
+ :return:
+ """
+ wim_account = self._get_of_controller(of_id)
+ self.db.delete_row(FROM='wim_accounts', WHERE={"uuid": of_id})
+ self.db.delete_row(FROM='wims', WHERE={"uuid": wim_account["wim_id"]})
+ return of_id
+
+ @staticmethod
+ def _format_of_controller(wim_account, wim=None):
+ of_data = {x: wim_account[x] for x in ("uuid", "name", "user")}
+ if isinstance(wim_account["config"], str):
+ config = yaml.load(wim_account["config"], Loader=yaml.Loader)
+ of_data["dpid"] = config.get("switch_id") or config.get("dpid")
+ of_data["version"] = config.get("version")
+ if wim:
+ of_data["url"] = wim["wim_url"]
+ of_data["type"] = wim["type"]
+ return of_data
+
+ def show_of_controller(self, of_id):
+ """
+ Show an openflow controller by dpid from DB.
+ :param db_filter: List with where query parameters
+ :return:
+ """
+ wim_account = self._get_of_controller(of_id)
+ wims = self.db.get_rows(FROM='wims', WHERE={"uuid": wim_account["wim_id"]})
+ return self._format_of_controller(wim_account, wims[0])
+
+ def get_of_controllers(self, filter=None):
+ """
+ Show an openflow controllers from DB.
+ :return:
+ """
+ filter = filter or {}
+ filter["sdn"] = "true"
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE=filter)
+ return [self._format_of_controller(w) for w in wim_accounts]
+
+ def set_of_port_mapping(self, maps, sdn_id, switch_dpid, vim_id):
+ """
+ Create new port mapping entry
+ :param of_maps: List with port mapping information
+ # maps =[{"ofc_id": <ofc_id>,"region": datacenter region,"compute_node": compute uuid,"pci": pci adress,
+ "switch_dpid": swith dpid,"switch_port": port name,"switch_mac": mac}]
+ :param sdn_id: ofc id
+ :param switch_dpid: switch dpid
+ :param vim_id: datacenter
+ :return:
+ """
+ # get wim from wim_account
+ wim_account = self._get_of_controller(sdn_id)
+ wim_id = wim_account["wim_id"]
+ db_wim_port_mappings = []
+ for map in maps:
+ _switch_dpid = map.get("switch_id") or map.get("switch_dpid") or switch_dpid
+ new_map = {
+ 'wim_id': wim_id,
+ 'switch_dpid': _switch_dpid,
+ "switch_port": map.get("switch_port"),
+ 'datacenter_id': vim_id,
+ "device_id": map.get("compute_node"),
+ "service_endpoint_id": _switch_dpid + "-" + str(uuid4())
+ }
+ if map.get("pci"):
+ new_map["device_interface_id"] = map["pci"].lower()
+ config = {}
+ if map.get("switch_mac"):
+ config["switch_mac"] = map["switch_mac"]
+ if config:
+ new_map["service_mapping_info"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+ db_wim_port_mappings.append(new_map)
+
+ db_tables = [
+ {"wim_port_mappings": db_wim_port_mappings},
+ ]
+ self.db.new_rows(db_tables, [])
+ return db_wim_port_mappings
+
+ def clear_of_port_mapping(self, db_filter=None):
+ """
+ Clear port mapping filtering using db_filter dict
+ :param db_filter: Parameter to filter during remove process
+ :return:
+ """
+ return self.db.delete_row(FROM='wim_port_mappings', WHERE=db_filter)
+
+ def get_of_port_mappings(self, db_filter=None):
+ """
+ Retrive port mapping from DB
+ :param db_filter:
+ :return:
+ """
+ maps = self.db.get_rows(WHERE=db_filter, FROM='wim_port_mappings')
+ for map in maps:
+ if map.get("service_mapping_info"):
+ map["service_mapping_info"] = yaml.load(map["service_mapping_info"], Loader=yaml.Loader)
+ else:
+ map["service_mapping_info"] = {}
+ return maps
+
+ def get_ports(self, instance_wim_net_id):
+ # get wim_id
+ instance_wim_net = self.db.get_rows(FROM='instance_wim_nets', WHERE={"uuid": instance_wim_net_id})
+ wim_id = instance_wim_net[0]["wim_id"]
+ switch_ports = []
+ ports = self.db.get_rows(FROM='instance_interfaces', WHERE={"instance_wim_net_id": instance_wim_net_id})
+ maps = self.get_of_port_mappings(db_filter={"wim_id": wim_id})
+ for port in ports:
+ map_ = next((x for x in maps if x.get("device_id") == port["compute_node"] and
+ x.get("device_interface_id") == port["pci"]), None)
+ if map_:
+ switch_port = {'switch_dpid': map_.get('switch_dpid') or map_.get('switch_id'),
+ 'switch_port': map_.get('switch_port')}
+ if switch_port not in switch_ports:
+ switch_ports.append(switch_port)
+ return switch_ports
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+import hashlib
+import shlex
+import unittest
+from contextlib import contextmanager
+from functools import wraps
+from hashlib import md5
+from os import environ, pathsep
+from subprocess import STDOUT, check_output
+from uuid import UUID
+
+from MySQLdb import connect
+
+from ..nfvo_db import nfvo_db
+
+HOST = environ.get('TEST_DB_HOST', 'localhost')
+USER = environ.get('TEST_DB_USER', 'mano')
+PASSWORD = environ.get('TEST_DB_PASSWORD', 'manopw')
+DATABASE = environ.get('TEST_DB_DATABASE', 'mano_db')
+
+
+def uuid(seed):
+ """Generates strings with a UUID format in a repeatable way"""
+ return str(UUID(md5(str(seed)).hexdigest()))
+
+
+def sha1(text):
+ """Generates SHA1 hash code from a text string"""
+ return hashlib.sha1(text).hexdigest()
+
+
+def run(*args, **kwargs):
+ """Run a command inside a subprocess, raising an exception when it fails
+
+ Arguments:
+ *args: you can pass any number of arquments as separated words in the
+ shell, or just a single string with the entire command
+ **kwargs: proxied to subprocess.check_output (by default
+ ``stderr=STDOUT`` and ``universal_newlines=True``
+ """
+ if len(args) == 1 and isinstance(args[0], str):
+ args = shlex.split(args[0])
+
+ opts = dict(stderr=STDOUT, universal_newlines=True)
+ opts.update(kwargs)
+ return check_output(args, **opts)
+
+
+# In order to not mess around, enforce user to explicit set the
+# test database in a env variable
+@unittest.skipUnless(
+ environ.get('TEST_DB_HOST'),
+ 'Test database not available. Please set TEST_DB_HOST env var')
+class TestCaseWithDatabase(unittest.TestCase):
+ """Connect to the database and provide methods to facilitate isolating the
+ database stored inside it between tests.
+
+ In order to avoid connecting, reconnecting, creating tables and destroying
+ tables all the time, this class manage the database using class-level
+ fixtures. This reduce the cost of performing these actions but not
+ guarantees isolation in the DB state between the tests.
+ To enforce isolation, please call the ``setup_tables`` and
+ ``empty_database`` directly, or write one single test per class.
+ """
+
+ host = HOST
+ user = USER
+ password = PASSWORD
+ database = DATABASE
+
+ @classmethod
+ def setup_tables(cls):
+ """Make sure the database is set up and in the right version, with all the
+ required tables.
+ """
+ dbutils = environ.get('DBUTILS')
+
+ if dbutils:
+ environ["PATH"] += pathsep + dbutils
+
+ return run('init_mano_db.sh',
+ '-u', cls.user,
+ '-p', cls.password,
+ '-h', cls.host,
+ '-d', cls.database)
+
+ @classmethod
+ def empty_database(cls):
+ """Clear the database, so one test does not interfere with the other"""
+ # Create a custom connection not attached to the database, so we can
+ # destroy and recreate the database itself
+ connection = connect(cls.host, cls.user, cls.password)
+ cursor = connection.cursor()
+ cursor.execute(
+ "DROP DATABASE {};".format(
+ connection.escape_string(cls.database)))
+ cursor.execute(
+ "CREATE DATABASE {};".format(
+ connection.escape_string(cls.database)))
+ cursor.close()
+ connection.close()
+
+
+class TestCaseWithDatabasePerTest(TestCaseWithDatabase):
+ """Ensure a connection to the database before and
+ drop tables after each test runs
+ """
+
+ def setUp(self):
+ self.setup_tables()
+ self.addCleanup(self.empty_database)
+
+ self.maxDiff = None
+
+ self.db = nfvo_db(self.host, self.user, self.password, self.database)
+ self.db.connect()
+
+ def populate(self, seeds=None, **kwargs):
+ """Seed the database with initial values"""
+ if not seeds:
+ seeds = []
+ if not isinstance(seeds, (list, tuple)):
+ seeds = [seeds]
+ if kwargs:
+ seeds.append(kwargs)
+ self.db.new_rows(seeds)
+
+ def count(self, table):
+ """Count number of rows in a table"""
+ return self.db.get_rows(
+ SELECT='COUNT(*) as count', FROM=table)[0]['count']
+
+ @contextmanager
+ def disable_foreign_keys(self):
+ """Do the test without checking foreign keys"""
+ try:
+ cursor = self.db.con.cursor()
+ cursor.execute('SET FOREIGN_KEY_CHECKS=0;')
+ yield
+ finally:
+ cursor.execute('SET FOREIGN_KEY_CHECKS=1;')
+
+
+def disable_foreign_keys(test):
+ """Do the test without checking foreign keys.
+ To be used together in subclasses of TestCaseWithDatabasePerTest
+ """
+ @wraps(test)
+ def _no_check(self, *args, **kwargs):
+ with self.disable_foreign_keys():
+ result = test(self, *args, **kwargs)
+
+ return result
+
+ return _no_check
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import logging
+import unittest
+from collections import defaultdict
+
+from io import StringIO
+
+from unittest.mock import MagicMock, patch
+
+logger = logging.getLogger()
+
+
+class TestCaseWithLogging(unittest.TestCase):
+ """Attach a special handler to the root logger, capturing the logs in a
+ internal buffer (caplog property).
+
+ To retrieve the logs, do::
+
+ self.caplog.getvalue()
+ """
+ def setUp(self):
+ super(TestCaseWithLogging, self).setUp()
+ self.logger = logging.getLogger()
+ self.caplog = StringIO()
+ self.log_handler = logging.StreamHandler(self.caplog)
+ self.logger.addHandler(self.log_handler)
+ self.logger.setLevel(logging.NOTSET)
+
+ def tearDown(self):
+ super(TestCaseWithLogging, self).tearDown()
+ self.log_handler.close()
+ self.logger.removeHandler(self.log_handler)
+
+
+def mock_imports(modules, preserve=()):
+ """Given a list of modules, mock everything, unless listed in the preserve
+ argument.
+ """
+ # Ensure iterable
+ if isinstance(modules, str):
+ modules = (modules,)
+ if isinstance(preserve, str):
+ preserve = (preserve,)
+
+ # First expand the list, since children modules needs their parent also
+ # mocked most of the time.
+ # Example: ['Crypto.PublicKey'] => ['Crypto', 'Crypto.PublicKey']
+ all_modules = []
+ for name in modules:
+ parts = name.split('.')
+ compound_name = []
+ for part in parts:
+ compound_name.append(part)
+ all_modules.append('.'.join(compound_name))
+
+ all_modules = set(m for m in all_modules if m not in preserve)
+ for module in all_modules:
+ logger.info('Mocking module `%s`', module)
+
+ mocks = {module: MagicMock() for module in all_modules}
+
+ return patch.dict('sys.modules', **mocks)
+
+
+def mock_dict(**kwargs):
+ """Create a dict that always respond something.
+
+ Arguments:
+ **kwargs: certain items that should be set in the created object
+ """
+ response = defaultdict(MagicMock)
+ for k, v in kwargs.items():
+ response[k] = v
+
+ return response
+
+
+def mock_object(**kwargs):
+ """Create an object that always respond something.
+
+ Arguments:
+ **kwargs: certain attributes that should be set in the created object
+ """
+ response = MagicMock()
+ for k, v in kwargs.items():
+ setattr(response, k, v)
+
+ return response
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+# pylint: disable=E1101
+import unittest
+
+from MySQLdb import connect, cursors, DatabaseError, IntegrityError
+
+from ..db_base import retry, with_transaction
+from ..nfvo_db import nfvo_db
+from .db_helpers import TestCaseWithDatabase
+
+
+class TestDbDecorators(TestCaseWithDatabase):
+ @classmethod
+ def setUpClass(cls):
+ connection = connect(cls.host, cls.user, cls.password)
+ cursor = connection.cursor()
+ cursor.execute(
+ "CREATE DATABASE IF NOT EXISTS {};".format(
+ connection.escape_string(cls.database)))
+ cursor.execute("use {};".format(cls.database))
+ cursor.execute("""\
+ CREATE TABLE IF NOT EXISTS `test_table` (\
+ `id` int(11) NOT NULL,
+ PRIMARY KEY (`id`)\
+ );\
+ """)
+ cursor.close()
+ connection.close()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.empty_database()
+
+ def setUp(self):
+ self.maxDiff = None
+ self.db = nfvo_db(self.host, self.user, self.password, self.database)
+ self.db.connect()
+ self.addCleanup(lambda: self.db.disconnect())
+
+ def db_run(self, query, cursor=None):
+ cursor = cursor or self.db.con.cursor()
+ cursor.execute(query)
+ return cursor.fetchone()
+
+ def test_retry_inject_attempt(self):
+ @retry
+ def _fn(db, attempt=None):
+ self.assertIsNotNone(attempt)
+ self.assertEqual(attempt.number, 1)
+
+ _fn(self.db)
+
+ def test_retry_accept_max_attempts(self):
+ success = []
+ failures = []
+
+ @retry(max_attempts=5)
+ def _fn(db, attempt=None):
+ if attempt.count < 4:
+ failures.append(attempt.count)
+ raise DatabaseError("Emulate DB error", "msg")
+ success.append(attempt.count)
+
+ _fn(self.db)
+ self.assertEqual(failures, [0, 1, 2, 3])
+ self.assertEqual(success, [4])
+
+ def test_retry_reconnect_auctomatically(self):
+ success = []
+ failures = []
+
+ @retry(max_attempts=3)
+ def _fn(db, attempt=None):
+ if attempt.count < 2:
+ failures.append(attempt.count)
+ db.con.close() # Simulate connection failure
+ result = self.db_run('select 1+1, 2+2;')
+ success.append(attempt.count)
+ return result
+
+ result = _fn(self.db)
+ self.assertEqual(failures, [0, 1])
+ self.assertEqual(success, [2])
+ self.assertEqual(result, (2, 4))
+
+ def test_retry_reraise_non_db_errors(self):
+ failures = []
+
+ @retry
+ def _fn(db, attempt=None):
+ failures.append(attempt.count)
+ raise SystemError("Non Correlated Error")
+
+ with self.assertRaises(SystemError):
+ _fn(self.db)
+
+ self.assertEqual(failures, [0])
+
+ def test_transaction_rollback(self):
+ with self.assertRaises(IntegrityError), \
+ self.db.transaction() as cursor:
+ # The first row is created normally
+ self.db_run('insert into test_table (id) values (1)', cursor)
+ # The second row fails due to repeated id
+ self.db_run('insert into test_table (id) values (1)', cursor)
+ # The entire transaction will rollback then, and therefore the
+ # first operation will be undone
+
+ count = self.db_run('select count(*) FROM test_table')
+ self.assertEqual(count, (0,))
+
+ def test_transaction_cursor(self):
+ with self.db.transaction(cursors.DictCursor) as cursor:
+ count = self.db_run('select count(*) as counter FROM test_table',
+ cursor)
+
+ self.assertEqual(count, {'counter': 0})
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+# pylint: disable=E1101
+
+import unittest
+
+from ..utils import (
+ get_arg,
+ inject_args,
+ remove_extra_items,
+)
+
+
+class TestUtils(unittest.TestCase):
+ def test_inject_args_curries_arguments(self):
+ fn = inject_args(lambda a=None, b=None: a + b, a=3, b=5)
+ self.assertEqual(fn(), 8)
+
+ def test_inject_args_doesnt_add_arg_if_not_needed(self):
+ fn = inject_args(lambda: 7, a=1, b=2)
+ self.assertEqual(fn(), 7)
+ fn = inject_args(lambda a=None: a, b=2)
+ self.assertEqual(fn(1), 1)
+
+ def test_inject_args_knows_how_to_handle_arg_order(self):
+ fn = inject_args(lambda a=None, b=None: b - a, a=3)
+ self.assertEqual(fn(b=4), 1)
+ fn = inject_args(lambda b=None, a=None: b - a, a=3)
+ self.assertEqual(fn(b=4), 1)
+
+ def test_inject_args_works_as_decorator(self):
+ fn = inject_args(x=1)(lambda x=None: x)
+ self.assertEqual(fn(), 1)
+
+ def test_get_arg__positional(self):
+ def _fn(x, y, z):
+ return x + y + z
+
+ x = get_arg("x", _fn, (1, 3, 4), {})
+ self.assertEqual(x, 1)
+ y = get_arg("y", _fn, (1, 3, 4), {})
+ self.assertEqual(y, 3)
+ z = get_arg("z", _fn, (1, 3, 4), {})
+ self.assertEqual(z, 4)
+
+ def test_get_arg__keyword(self):
+ def _fn(x, y, z=5):
+ return x + y + z
+
+ z = get_arg("z", _fn, (1, 2), {"z": 3})
+ self.assertEqual(z, 3)
+
+
+
+ def test_remove_extra_items__keep_aditional_properties(self):
+ schema = {
+ "type": "object",
+ "properties": {
+ "a": {
+ "type": "object",
+ "properties": {
+ "type": "object",
+ "properties": {"b": "string"},
+ },
+ "additionalProperties": True,
+ }
+ },
+ }
+
+ example = {"a": {"b": 1, "c": 2}, "d": 3}
+ deleted = remove_extra_items(example, schema)
+ self.assertIn("d", deleted)
+ self.assertIs(example.get("d"), None)
+ self.assertEqual(example["a"]["c"], 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+utils is a module that implements functions that are used by all openmano modules,
+dealing with aspects such as reading/writing files, formatting inputs/outputs for quick translation
+from dictionaries to appropriate database dictionaries, etc.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$08-sep-2014 12:21:22$"
+
+import datetime
+import time
+import warnings
+from functools import reduce, partial, wraps
+from itertools import tee
+
+from itertools import filterfalse
+
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
+from inspect import getfullargspec as getspec
+
+#from bs4 import BeautifulSoup
+
+def read_file(file_to_read):
+ """Reads a file specified by 'file_to_read' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
+ try:
+ f = open(file_to_read, 'r')
+ read_data = f.read()
+ f.close()
+ except Exception as e:
+ return (False, str(e))
+
+ return (True, read_data)
+
+def write_file(file_to_write, text):
+ """Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure"""
+ try:
+ f = open(file_to_write, 'w')
+ f.write(text)
+ f.close()
+ except Exception as e:
+ return (False, str(e))
+
+ return (True, None)
+
+def format_in(http_response, schema):
+ try:
+ client_data = http_response.json()
+ js_v(client_data, schema)
+ #print "Input data: ", str(client_data)
+ return True, client_data
+ except js_e.ValidationError as exc:
+ print("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+ return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+
+def remove_extra_items(data, schema):
+ deleted = []
+ if isinstance(data, (tuple, list)):
+ for d in data:
+ a = remove_extra_items(d, schema['items'])
+ if a:
+ deleted.append(a)
+ elif isinstance(data, dict):
+ # TODO deal with patternProperties
+ if 'properties' not in schema:
+ return None
+ to_delete = []
+ for k in data.keys():
+ if k in schema['properties']:
+ a = remove_extra_items(data[k], schema['properties'][k])
+ if a:
+ deleted.append({k: a})
+ elif not schema.get('additionalProperties'):
+ to_delete.append(k)
+ deleted.append(k)
+ for k in to_delete:
+ del data[k]
+ if len(deleted) == 0:
+ return None
+ elif len(deleted) == 1:
+ return deleted[0]
+
+ return deleted
+
+#def format_html2text(http_content):
+# soup=BeautifulSoup(http_content)
+# text = soup.p.get_text() + " " + soup.pre.get_text()
+# return text
+
+
+def delete_nulls(var):
+ if isinstance(var, dict):
+ to_delete = []
+ for k in var.keys():
+ if var[k] is None:
+ to_delete.append([k])
+ elif isinstance(var[k], (dict, list, tuple)):
+ if delete_nulls(var[k]):
+ to_delete.append(k)
+ for k in to_delete:
+ del var[k]
+ if len(var) == 0:
+ return True
+ elif isinstance(var, (list, tuple)):
+ for k in var:
+ if isinstance(k, dict):
+ delete_nulls(k)
+ if len(var) == 0:
+ return True
+ return False
+
+
+def convert_bandwidth(data, reverse=False):
+ '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
+ It assumes that bandwidth is well formed
+ Attributes:
+ 'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+ 'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+ Return:
+ None
+ '''
+ if isinstance(data, dict):
+ for k in data.keys():
+ if isinstance(data[k], (dict, tuple, list)):
+ convert_bandwidth(data[k], reverse)
+ if "bandwidth" in data:
+ try:
+ value=str(data["bandwidth"])
+ if not reverse:
+ pos = value.find("bps")
+ if pos>0:
+ if value[pos-1]=="G":
+ data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
+ elif value[pos-1]=="k":
+ data["bandwidth"]= int(data["bandwidth"][:pos-1]) // 1000
+ else:
+ data["bandwidth"]= int(data["bandwidth"][:pos])
+ else:
+ value = int(data["bandwidth"])
+ if value % 1000 == 0 and value > 1000:
+ data["bandwidth"] = str(value // 1000) + " Gbps"
+ else:
+ data["bandwidth"] = str(value) + " Mbps"
+ except:
+ print("convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"])
+ return
+ if isinstance(data, (tuple, list)):
+ for k in data:
+ if isinstance(k, (dict, tuple, list)):
+ convert_bandwidth(k, reverse)
+
+def convert_float_timestamp2str(var):
+ '''Converts timestamps (created_at, modified_at fields) represented as float
+ to a string with the format '%Y-%m-%dT%H:%i:%s'
+ It enters recursively in the dict var finding this kind of variables
+ '''
+ if type(var) is dict:
+ for k,v in var.items():
+ if type(v) is float and k in ("created_at", "modified_at"):
+ var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
+ elif type(v) is dict or type(v) is list or type(v) is tuple:
+ convert_float_timestamp2str(v)
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for v in var:
+ convert_float_timestamp2str(v)
+
+def convert_datetime2str(var):
+ '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+ It enters recursively in the dict var finding this kind of variables
+ '''
+ if type(var) is dict:
+ for k,v in var.items():
+ if type(v) is datetime.datetime:
+ var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+ elif type(v) is dict or type(v) is list or type(v) is tuple:
+ convert_datetime2str(v)
+ if len(var) == 0: return True
+ elif type(var) is list or type(var) is tuple:
+ for v in var:
+ convert_datetime2str(v)
+
+def convert_str2boolean(data, items):
+ '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+ Done recursively
+ Attributes:
+ 'data': dictionary variable to be checked. None or empty is considered valid
+ 'items': tuple of keys to convert
+ Return:
+ None
+ '''
+ if type(data) is dict:
+ for k in data.keys():
+ if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+ convert_str2boolean(data[k], items)
+ if k in items:
+ if type(data[k]) is str:
+ if data[k]=="false" or data[k]=="False": data[k]=False
+ elif data[k]=="true" or data[k]=="True": data[k]=True
+ if type(data) is tuple or type(data) is list:
+ for k in data:
+ if type(k) is dict or type(k) is tuple or type(k) is list:
+ convert_str2boolean(k, items)
+
+def check_valid_uuid(uuid):
+ id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+ id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
+ try:
+ js_v(uuid, id_schema)
+ return True
+ except js_e.ValidationError:
+ try:
+ js_v(uuid, id_schema2)
+ return True
+ except js_e.ValidationError:
+ return False
+ return False
+
+
+def expand_brackets(text):
+ """
+ Change a text with TEXT[ABC..] into a list with [TEXTA, TEXTB, TEXC, ...
+ if no bracket is used it just return the a list with the single text
+ It uses recursivity to allow several [] in the text
+ :param text:
+ :return:
+ """
+ if text is None:
+ return (None, )
+ start = text.find("[")
+ end = text.find("]")
+ if start < 0 or end < 0:
+ return [text]
+ text_list = []
+ for char in text[start+1:end]:
+ text_list += expand_brackets(text[:start] + char + text[end+1:])
+ return text_list
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
+
+
+def truncate(text, max_length=1024):
+ """Limit huge texts in number of characters"""
+ text = str(text)
+ if text and len(text) >= max_length:
+ return text[:max_length//2-3] + " ... " + text[-max_length//2+3:]
+ return text
+
+
+def merge_dicts(*dicts, **kwargs):
+ """Creates a new dict merging N others and keyword arguments.
+ Right-most dicts take precedence.
+ Keyword args take precedence.
+ """
+ return reduce(
+ lambda acc, x: acc.update(x) or acc,
+ list(dicts) + [kwargs], {})
+
+
+def remove_none_items(adict):
+ """Return a similar dict without keys associated to None values"""
+ return {k: v for k, v in adict.items() if v is not None}
+
+
+def filter_dict_keys(adict, allow):
+ """Return a similar dict, but just containing the explicitly allowed keys
+
+ Arguments:
+ adict (dict): Simple python dict data struct
+ allow (list): Explicits allowed keys
+ """
+ return {k: v for k, v in adict.items() if k in allow}
+
+
+def filter_out_dict_keys(adict, deny):
+ """Return a similar dict, but not containing the explicitly denied keys
+
+ Arguments:
+ adict (dict): Simple python dict data struct
+ deny (list): Explicits denied keys
+ """
+ return {k: v for k, v in adict.items() if k not in deny}
+
+
+def expand_joined_fields(record):
+ """Given a db query result, explode the fields that contains `.` (join
+ operations).
+
+ Example
+ >> expand_joined_fiels({'wim.id': 2})
+ # {'wim': {'id': 2}}
+ """
+ result = {}
+ for field, value in record.items():
+ keys = field.split('.')
+ target = result
+ target = reduce(lambda target, key: target.setdefault(key, {}),
+ keys[:-1], result)
+ target[keys[-1]] = value
+
+ return result
+
+
+def ensure(condition, exception):
+ """Raise an exception if condition is not met"""
+ if not condition:
+ raise exception
+
+
+def partition(predicate, iterable):
+ """Create two derived iterators from a single one
+ The first iterator created will loop thought the values where the function
+ predicate is True, the second one will iterate over the values where it is
+ false.
+ """
+ iterable1, iterable2 = tee(iterable)
+ return filter(predicate, iterable2), filterfalse(predicate, iterable1)
+
+
+def pipe(*functions):
+ """Compose functions of one argument in the opposite order,
+ So pipe(f, g)(x) = g(f(x))
+ """
+ return lambda x: reduce(lambda acc, f: f(acc), functions, x)
+
+
+def compose(*functions):
+ """Compose functions of one argument,
+ So compose(f, g)(x) = f(g(x))
+ """
+ return lambda x: reduce(lambda acc, f: f(acc), functions[::-1], x)
+
+
+def safe_get(target, key_path, default=None):
+ """Given a path of keys (eg.: "key1.key2.key3"), return a nested value in
+ a nested dict if present, or the default value
+ """
+ keys = key_path.split('.')
+ target = reduce(lambda acc, key: acc.get(key) or {}, keys[:-1], target)
+ return target.get(keys[-1], default)
+
+
+class Attempt(object):
+ """Auxiliary class to be used in an attempt to retry executing a failing
+ procedure
+
+ Attributes:
+ count (int): 0-based "retries" counter
+ max_attempts (int): maximum number of "retries" allowed
+ info (dict): extra information about the specific attempt
+ (can be used to produce more meaningful error messages)
+ """
+ __slots__ = ('count', 'max', 'info')
+
+ MAX = 3
+
+ def __init__(self, count=0, max_attempts=MAX, info=None):
+ self.count = count
+ self.max = max_attempts
+ self.info = info or {}
+
+ @property
+ def countdown(self):
+ """Like count, but in the opposite direction"""
+ return self.max - self.count
+
+ @property
+ def number(self):
+ """1-based counter"""
+ return self.count + 1
+
+
+def inject_args(fn=None, **args):
+ """Partially apply keyword arguments in a function, but only if the function
+ define them in the first place
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(inject_args, **args)
+
+ spec = getspec(fn)
+ return wraps(fn)(partial(fn, **filter_dict_keys(args, spec.args)))
+
+
+def get_arg(name, fn, args, kwargs):
+ """Find the value of an argument for a function, given its argument list.
+
+ This function can be used to display more meaningful errors for debugging
+ """
+ if name in kwargs:
+ return kwargs[name]
+
+ spec = getspec(fn)
+ if name in spec.args:
+ i = spec.args.index(name)
+ return args[i] if i < len(args) else None
+
+ return None
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+""""
+This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
+The tasks are stored at database in table vim_wim_actions
+Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
+is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
+In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
+there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
+
+The task content is (M: stored at memory, D: stored at database):
+ MD instance_action_id: reference a global action over an instance-scenario: database instance_actions
+ MD task_index: index number of the task. This together with the previous forms a unique key identifier
+ MD datacenter_vim_id: should contain the uuid of the VIM managed by this thread
+ MD vim_id: id of the vm,net,etc at VIM
+ MD item: database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
+ MD item_id: uuid of the referenced entry in the previous table
+ MD action: CREATE, DELETE, FIND
+ MD status: SCHEDULED: action need to be done
+ BUILD: not used
+ DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
+ FAILED: It cannot be created/found/deleted
+ FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
+ it is never processed by any thread
+ SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
+ MD extra: text with yaml format at database, dict at memory with:
+ params: list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
+ from other related tasks
+ find: (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
+ the FIND params
+ depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
+ on a net creation
+ can contain an int (single index on the same instance-action) or str (compete action ID)
+ sdn_net_id: used for net.
+ interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
+ iface_id: uuid of intance_interfaces
+ sdn_port_id:
+ sdn_net_id:
+ vim_info
+ created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
+ created: False if the VIM element is not created by other actions, and it should not be deleted
+ vim_status: VIM status of the element. Stored also at database in the instance_XXX
+ vim_info: Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
+ vim_wim_actions
+ M depends: dict with task_index(from depends_on) to dependency task
+ M params: same as extra[params]
+ MD error_msg: descriptive text upon an error.Stored also at database instance_XXX
+ MD created_at: task creation time. The task of creation must be the oldest
+ MD modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
+ be done
+ MD related: All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
+ same value of related, but this thread only process those task of one VIM. Also related can be the
+ same among several NS os isntance-scenarios
+ MD worker: Used to lock in case of several thread workers.
+
+"""
+
+import threading
+import time
+import queue
+import logging
+from osm_ro_plugin import vimconn
+from osm_ro_plugin.sdnconn import SdnConnectorError
+import yaml
+from osm_ro.db_base import db_base_Exception
+from http import HTTPStatus
+from copy import deepcopy
+
+__author__ = "Alfonso Tierno, Pablo Montes"
+__date__ = "$28-Sep-2017 12:07:15$"
+
+
+def is_task_id(task_id):
+ return task_id.startswith("TASK-")
+
+
+class VimThreadException(Exception):
+ pass
+
+
+class VimThreadExceptionNotFound(VimThreadException):
+ pass
+
+
+class vim_thread(threading.Thread):
+ REFRESH_BUILD = 5 # 5 seconds
+ REFRESH_ACTIVE = 60 # 1 minute
+ REFRESH_ERROR = 600
+ REFRESH_DELETE = 3600 * 10
+
+ def __init__(self, task_lock, plugins, name=None, wim_account_id=None, datacenter_tenant_id=None, db=None):
+ """Init a thread.
+ Arguments:
+ 'id' number of thead
+ 'name' name of thread
+ 'host','user': host ip or name to manage and user
+ 'db', 'db_lock': database class and lock to use it in exclusion
+ """
+ threading.Thread.__init__(self)
+ self.plugins = plugins
+ self.plugin_name = "unknown"
+ self.vim = None
+ self.sdnconnector = None
+ self.sdnconn_config = None
+ self.error_status = None
+ self.wim_account_id = wim_account_id
+ self.datacenter_tenant_id = datacenter_tenant_id
+ self.port_mappings = None
+ if self.wim_account_id:
+ self.target_k = "wim_account_id"
+ self.target_v = self.wim_account_id
+ else:
+ self.target_k = "datacenter_vim_id"
+ self.target_v = self.datacenter_tenant_id
+ if not name:
+ self.name = wim_account_id or str(datacenter_tenant_id)
+ else:
+ self.name = name
+ self.vim_persistent_info = {}
+ self.my_id = self.name[:64]
+
+ self.logger = logging.getLogger('openmano.{}.{}'.format("vim" if self.datacenter_tenant_id else "sdn",
+ self.name))
+ self.db = db
+
+ self.task_lock = task_lock
+ self.task_queue = queue.Queue(2000)
+
+ def _proccess_sdn_exception(self, exc):
+ if isinstance(exc, SdnConnectorError):
+ raise
+ else:
+ self.logger.error("plugin={} throws a non SdnConnectorError exception {}".format(self.plugin_name, exc),
+ exc_info=True)
+ raise SdnConnectorError(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+
+ def _proccess_vim_exception(self, exc):
+ if isinstance(exc, vimconn.VimConnException):
+ raise
+ else:
+ self.logger.error("plugin={} throws a non vimconnException exception {}".format(self.plugin_name, exc),
+ exc_info=True)
+ raise vimconn.VimConnException(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+
+ def get_vim_sdn_connector(self):
+ if self.datacenter_tenant_id:
+ try:
+ from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
+ select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+ 'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+ 'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+ 'user', 'passwd', 'dt.config as dt_config')
+ where_ = {"dt.uuid": self.datacenter_tenant_id}
+ vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
+ vim = vims[0]
+ vim_config = {}
+ if vim["config"]:
+ vim_config.update(yaml.load(vim["config"], Loader=yaml.Loader))
+ if vim["dt_config"]:
+ vim_config.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+ vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
+ vim_config['datacenter_id'] = vim.get('datacenter_id')
+
+ # get port_mapping
+ # vim_port_mappings = self.ovim.get_of_port_mappings(
+ # db_filter={"datacenter_id": vim_config['datacenter_id']})
+ # vim_config["wim_external_ports"] = [x for x in vim_port_mappings
+ # if x["service_mapping_info"].get("wim")]
+ self.plugin_name = "rovim_" + vim["type"]
+ self.vim = self.plugins[self.plugin_name](
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim['user'], passwd=vim['passwd'],
+ config=vim_config, persistent_info=self.vim_persistent_info
+ )
+ self.error_status = None
+ self.logger.info("Vim Connector loaded for vim_account={}, plugin={}".format(
+ self.datacenter_tenant_id, self.plugin_name))
+ except Exception as e:
+ self.logger.error("Cannot load vimconnector for vim_account={} plugin={}: {}".format(
+ self.datacenter_tenant_id, self.plugin_name, e))
+ self.vim = None
+ self.error_status = "Error loading vimconnector: {}".format(e)
+ else:
+ try:
+ wim_account = self.db.get_rows(FROM="wim_accounts", WHERE={"uuid": self.wim_account_id})[0]
+ wim = self.db.get_rows(FROM="wims", WHERE={"uuid": wim_account["wim_id"]})[0]
+ if wim["config"]:
+ self.sdnconn_config = yaml.load(wim["config"], Loader=yaml.Loader)
+ else:
+ self.sdnconn_config = {}
+ if wim_account["config"]:
+ self.sdnconn_config.update(yaml.load(wim_account["config"], Loader=yaml.Loader))
+ self.port_mappings = self.db.get_rows(FROM="wim_port_mappings", WHERE={"wim_id": wim_account["wim_id"]})
+ if self.port_mappings:
+ self.sdnconn_config["service_endpoint_mapping"] = self.port_mappings
+ self.plugin_name = "rosdn_" + wim["type"]
+ self.sdnconnector = self.plugins[self.plugin_name](
+ wim, wim_account, config=self.sdnconn_config)
+ self.error_status = None
+ self.logger.info("Sdn Connector loaded for wim_account={}, plugin={}".format(
+ self.wim_account_id, self.plugin_name))
+ except Exception as e:
+ self.logger.error("Cannot load sdn connector for wim_account={}, plugin={}: {}".format(
+ self.wim_account_id, self.plugin_name, e), exc_info=True)
+ self.sdnconnector = None
+ self.error_status = self._format_vim_error_msg("Error loading sdn connector: {}".format(e))
+
+ def _get_db_task(self):
+ """
+ Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
+ :return: None
+ """
+ now = time.time()
+ try:
+ database_limit = 20
+ task_related = None
+ while True:
+ # get 20 (database_limit) entries each time
+ vim_actions = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={self.target_k: self.target_v,
+ "status": ['SCHEDULED', 'BUILD', 'DONE'],
+ "worker": [None, self.my_id], "modified_at<=": now
+ },
+ ORDER_BY=("modified_at", "created_at",),
+ LIMIT=database_limit)
+ if not vim_actions:
+ return None, None
+ # if vim_actions[0]["modified_at"] > now:
+ # return int(vim_actions[0] - now)
+ for task in vim_actions:
+ # block related task
+ if task_related == task["related"]:
+ continue # ignore if a locking has already tried for these task set
+ task_related = task["related"]
+ # lock ...
+ self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
+ WHERE={self.target_k: self.target_v,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "worker": [None, self.my_id],
+ "related": task_related,
+ "item": task["item"],
+ })
+ # ... and read all related and check if locked
+ related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={self.target_k: self.target_v,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "related": task_related,
+ "item": task["item"],
+ },
+ ORDER_BY=("created_at",))
+ # check that all related tasks have been locked. If not release and try again. It can happen
+ # for race conditions if a new related task has been inserted by nfvo in the process
+ some_tasks_locked = False
+ some_tasks_not_locked = False
+ creation_task = None
+ for relate_task in related_tasks:
+ if relate_task["worker"] != self.my_id:
+ some_tasks_not_locked = True
+ else:
+ some_tasks_locked = True
+ if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
+ creation_task = relate_task
+ if some_tasks_not_locked:
+ if some_tasks_locked: # unlock
+ self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
+ WHERE={self.target_k: self.target_v,
+ "worker": self.my_id,
+ "related": task_related,
+ "item": task["item"],
+ })
+ continue
+
+ task["params"] = None
+ if task["extra"]:
+ extra = yaml.load(task["extra"], Loader=yaml.Loader)
+ else:
+ extra = {}
+ task["extra"] = extra
+ if extra.get("depends_on"):
+ task["depends"] = {}
+ if extra.get("params"):
+ task["params"] = deepcopy(extra["params"])
+ return task, related_tasks
+ except Exception as e:
+ self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
+ return None, None
+
+ def _delete_task(self, task):
+ """
+ Determine if this task need to be done or superseded
+ :return: None
+ """
+
+ def copy_extra_created(copy_to, copy_from):
+ copy_to["created"] = copy_from["created"]
+ if copy_from.get("sdn_net_id"):
+ copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
+ if copy_from.get("interfaces"):
+ copy_to["interfaces"] = copy_from["interfaces"]
+ if copy_from.get("sdn-ports"):
+ copy_to["sdn-ports"] = copy_from["sdn-ports"]
+ if copy_from.get("created_items"):
+ if not copy_to.get("created_items"):
+ copy_to["created_items"] = {}
+ copy_to["created_items"].update(copy_from["created_items"])
+
+ task_create = None
+ dependency_task = None
+ deletion_needed = task["extra"].get("created", False)
+ if task["status"] == "FAILED":
+ return # TODO need to be retry??
+ try:
+ # get all related tasks. task of creation must be the first in the list of related_task,
+ # unless the deletion fails and it is pendingit fails
+ # TODO this should be removed, passing related_tasks
+ related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={self.target_k: self.target_v,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "action": ["FIND", "CREATE"],
+ "related": task["related"],
+ },
+ ORDER_BY=("created_at",),
+ )
+ for related_task in related_tasks:
+ if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
+ task_create = related_task
+ # TASK_CREATE
+ if related_task["extra"]:
+ extra_created = yaml.load(related_task["extra"], Loader=yaml.Loader)
+ if extra_created.get("created"):
+ deletion_needed = True
+ related_task["extra"] = extra_created
+ elif not dependency_task:
+ dependency_task = related_task
+ if task_create and dependency_task:
+ break
+
+ # mark task_create as FINISHED
+ if task_create:
+ self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
+ WHERE={self.target_k: self.target_v,
+ "instance_action_id": task_create["instance_action_id"],
+ "task_index": task_create["task_index"]
+ })
+ if not deletion_needed:
+ return False
+ elif dependency_task:
+ # move create information from task_create to relate_task
+ extra_new_created = yaml.load(dependency_task["extra"], Loader=yaml.Loader) or {}
+ extra_new_created["created"] = extra_created["created"]
+ copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
+
+ self.db.update_rows("vim_wim_actions",
+ UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
+ width=256),
+ "vim_id": task_create.get("vim_id")},
+ WHERE={self.target_k: self.target_v,
+ "instance_action_id": dependency_task["instance_action_id"],
+ "task_index": dependency_task["task_index"]
+ })
+ return False
+ elif task_create:
+ task["vim_id"] = task_create["vim_id"]
+ copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
+ # Ensure this task extra information is stored at database
+ self.db.update_rows("vim_wim_actions",
+ UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True,
+ width=256)},
+ WHERE={self.target_k: self.target_v,
+ "instance_action_id": task["instance_action_id"],
+ "task_index": task["task_index"],
+ })
+ return True
+ return deletion_needed
+
+ except Exception as e:
+ self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
+
+ def _refres_vm(self, task):
+ """Call VIM to get VMs status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ vm_to_refresh_list = [vim_id]
+ try:
+ vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+ # check and update interfaces
+ task_warning_msg = ""
+ for interface in vim_info.get("interfaces", ()):
+ vim_interface_id = interface["vim_interface_id"]
+ if vim_interface_id not in task["extra"]["interfaces"]:
+ self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
+ task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
+ continue
+ task_interface = task["extra"]["interfaces"][vim_interface_id]
+ task_vim_interface = task_interface.get("vim_info")
+ if task_vim_interface != interface:
+ # delete old port
+ # if task_interface.get("sdn_port_id"):
+ # try:
+ # self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+ # task_interface["sdn_port_id"] = None
+ # except ovimException as e:
+ # error_text = "ovimException deleting external_port={}: {}".format(
+ # task_interface["sdn_port_id"], e)
+ # self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ # task_warning_msg += error_text
+ # # TODO Set error_msg at instance_nets instead of instance VMs
+
+ # Create SDN port
+ # sdn_net_id = task_interface.get("sdn_net_id")
+ # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+ # sdn_port_name = sdn_net_id + "." + task["vim_id"]
+ # sdn_port_name = sdn_port_name[:63]
+ # try:
+ # sdn_port_id = self.ovim.new_external_port(
+ # {"compute_node": interface["compute_node"],
+ # "pci": interface["pci"],
+ # "vlan": interface.get("vlan"),
+ # "net_id": sdn_net_id,
+ # "region": self.vim["config"]["datacenter_id"],
+ # "name": sdn_port_name,
+ # "mac": interface.get("mac_address")})
+ # task_interface["sdn_port_id"] = sdn_port_id
+ # except (ovimException, Exception) as e:
+ # error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+ # format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+ # self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ # task_warning_msg += error_text
+ # # TODO Set error_msg at instance_nets instead of instance VMs
+
+ self.db.update_rows('instance_interfaces',
+ UPDATE={"mac_address": interface.get("mac_address"),
+ "ip_address": interface.get("ip_address"),
+ "vim_interface_id": interface.get("vim_interface_id"),
+ "vim_info": interface.get("vim_info"),
+ "sdn_port_id": task_interface.get("sdn_port_id"),
+ "compute_node": interface.get("compute_node"),
+ "pci": interface.get("pci"),
+ "vlan": interface.get("vlan")},
+ WHERE={'uuid': task_interface["iface_id"]})
+ task_interface["vim_info"] = interface
+ # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+ # # TODO Send message to task SDN to update
+
+ # check and update task and instance_vms database
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
+ elif task_warning_msg:
+ vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
+
+ def _refres_net(self, task):
+ """Call VIM to get network status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ net_to_refresh_list = [vim_id]
+ try:
+ vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+ task_vim_info = task["extra"].get("vim_info")
+ task_vim_status = task["extra"].get("vim_status")
+ task_error_msg = task.get("error_msg")
+ # task_sdn_net_id = task["extra"].get("sdn_net_id")
+
+ vim_info_status = vim_info["status"]
+ vim_info_error_msg = vim_info.get("error_msg")
+ # get ovim status
+ # if task_sdn_net_id:
+ # try:
+ # sdn_net = self.ovim.show_network(task_sdn_net_id)
+ # except (ovimException, Exception) as e:
+ # text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+ # self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+ # sdn_net = {"status": "ERROR", "last_error": text_error}
+ # if sdn_net["status"] == "ERROR":
+ # if not vim_info_error_msg:
+ # vim_info_error_msg = str(sdn_net.get("last_error"))
+ # else:
+ # vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+ # self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+ # self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+ # vim_info_status = "ERROR"
+ # elif sdn_net["status"] == "BUILD":
+ # if vim_info_status == "ACTIVE":
+ # vim_info_status = "BUILD"
+
+ # update database
+ if vim_info_error_msg:
+ vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
+ if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ task["extra"]["vim_status"] = vim_info_status
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+ database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+ return database_update
+
+ def _proccess_pending_tasks(self, task, related_tasks):
+ old_task_status = task["status"]
+ create_or_find = False # if as result of processing this task something is created or found
+ next_refresh = 0
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+
+ try:
+ if task["status"] == "SCHEDULED":
+ # check if tasks that this depends on have been completed
+ dependency_not_completed = False
+ dependency_modified_at = 0
+ for task_index in task["extra"].get("depends_on", ()):
+ task_dependency = self._look_for_task(task["instance_action_id"], task_index)
+ if not task_dependency:
+ raise VimThreadException(
+ "Cannot get depending net task trying to get depending task {}.{}".format(
+ task["instance_action_id"], task_index))
+ # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
+ # database must be look again
+ if task_dependency["status"] == "SCHEDULED":
+ dependency_not_completed = True
+ dependency_modified_at = task_dependency["modified_at"]
+ break
+ elif task_dependency["status"] == "FAILED":
+ raise VimThreadException(
+ "Cannot {} {}, (task {}.{}) because depends on failed {}.{}, (task{}.{}): {}".format(
+ task["action"], task["item"],
+ task["instance_action_id"], task["task_index"],
+ task_dependency["instance_action_id"], task_dependency["task_index"],
+ task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
+
+ task["depends"]["TASK-"+str(task_index)] = task_dependency
+ task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] = task_dependency
+ if dependency_not_completed:
+ # Move this task to the time dependency is going to be modified plus 10 seconds.
+ self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
+ UPDATE={"worker": None},
+ WHERE={self.target_k: self.target_v, "worker": self.my_id,
+ "related": task["related"],
+ })
+ # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
+ # if task["extra"]["tries"] > 3:
+ # raise VimThreadException(
+ # "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
+ # "(task {}.{})".format(task["action"], task["item"],
+ # task["instance_action_id"], task["task_index"],
+ # task_dependency["instance_action_id"], task_dependency["task_index"]
+ # task_dependency["action"], task_dependency["item"]))
+ return
+
+ database_update = None
+ if task["action"] == "DELETE":
+ deleted_needed = self._delete_task(task)
+ if not deleted_needed:
+ task["status"] = "SUPERSEDED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+
+ if task["status"] == "SUPERSEDED":
+ # not needed to do anything but update database with the new status
+ database_update = None
+ elif not self.vim and not self.sdnconnector:
+ task["status"] = "FAILED"
+ task["error_msg"] = self.error_status
+ database_update = {"status": "VIM_ERROR" if self.datacenter_tenant_id else "WIM_ERROR",
+ "error_msg": task["error_msg"]}
+ elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
+ # Do nothing, just copy values from one to another and update database
+ task["status"] = related_tasks[0]["status"]
+ task["error_msg"] = related_tasks[0]["error_msg"]
+ task["vim_id"] = related_tasks[0]["vim_id"]
+ extra = yaml.load(related_tasks[0]["extra"], Loader=yaml.Loader)
+ task["extra"]["vim_status"] = extra.get("vim_status")
+ next_refresh = related_tasks[0]["modified_at"] + 0.001
+ database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
+ "error_msg": task["error_msg"]}
+ if task["item"] == 'instance_vms':
+ database_update["vim_vm_id"] = task["vim_id"]
+ elif task["item"] == 'instance_nets':
+ database_update["vim_net_id"] = task["vim_id"]
+ elif task["item"] == 'instance_vms':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_vm(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_vm(task)
+ elif task["action"] == "DELETE":
+ self.del_vm(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_nets':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_net(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_net(task)
+ elif task["action"] == "DELETE":
+ self.del_net(task)
+ elif task["action"] == "FIND":
+ database_update = self.get_net(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_wim_nets':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self.new_or_update_sdn_net(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_or_update_sdn_net(task)
+ elif task["action"] == "DELETE":
+ self.del_sdn_net(task)
+ elif task["action"] == "FIND":
+ database_update = self.get_sdn_net(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfis':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_sfis(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sfi(task)
+ elif task["action"] == "DELETE":
+ self.del_sfi(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfs':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_sfs(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sf(task)
+ elif task["action"] == "DELETE":
+ self.del_sf(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_classifications':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_classifications(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_classification(task)
+ elif task["action"] == "DELETE":
+ self.del_classification(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfps':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_sfps(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sfp(task)
+ elif task["action"] == "DELETE":
+ self.del_sfp(task)
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task action {}".format(task["action"]))
+ else:
+ raise vimconn.VimConnException(self.name + "unknown task item {}".format(task["item"]))
+ # TODO
+ except Exception as e:
+ if not isinstance(e, VimThreadException):
+ self.logger.error("Error executing task={}: {}".format(task_id, e), exc_info=True)
+ task["error_msg"] = str(e)
+ task["status"] = "FAILED"
+ database_update = {"status": "VIM_ERROR" if task["item"] != "instance_wim_nets" else "WIM_ERROR",
+ "error_msg": task["error_msg"]}
+ # if task["item"] == 'instance_vms':
+ # database_update["vim_vm_id"] = None
+ # elif task["item"] == 'instance_nets':
+ # database_update["vim_net_id"] = None
+
+ self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
+ task_id, task["item"], task["action"], task["status"],
+ task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
+ try:
+ if not next_refresh:
+ if task["status"] == "DONE":
+ next_refresh = time.time()
+ if task["extra"].get("vim_status") == "BUILD":
+ next_refresh += self.REFRESH_BUILD
+ elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR", "WIM_ERROR"):
+ next_refresh += self.REFRESH_ERROR
+ elif task["extra"].get("vim_status") == "DELETED":
+ next_refresh += self.REFRESH_DELETE
+ else:
+ next_refresh += self.REFRESH_ACTIVE
+ elif task["status"] == "FAILED":
+ next_refresh = time.time() + self.REFRESH_DELETE
+
+ if create_or_find:
+ # modify all related task with action FIND/CREATED non SCHEDULED
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=next_refresh + 0.001,
+ UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+ "error_msg": task["error_msg"],
+ },
+
+ WHERE={self.target_k: self.target_v,
+ "worker": self.my_id,
+ "action": ["FIND", "CREATE"],
+ "related": task["related"],
+ "status<>": "SCHEDULED",
+ })
+ # modify own task
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=next_refresh,
+ UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+ "error_msg": task["error_msg"],
+ "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
+ WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
+ # Unlock tasks
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=0,
+ UPDATE={"worker": None},
+ WHERE={self.target_k: self.target_v,
+ "worker": self.my_id,
+ "related": task["related"],
+ })
+
+ # Update table instance_actions
+ if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
+ self.db.update_rows(
+ table="instance_actions",
+ UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
+ WHERE={"uuid": task["instance_action_id"]})
+ if database_update:
+ where_filter = {"related": task["related"]}
+ if task["item"] == "instance_nets" and task["datacenter_vim_id"]:
+ where_filter["datacenter_tenant_id"] = task["datacenter_vim_id"]
+ self.db.update_rows(table=task["item"],
+ UPDATE=database_update,
+ WHERE=where_filter)
+ except db_base_Exception as e:
+ self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
+
+ def insert_task(self, task):
+ try:
+ self.task_queue.put(task, False)
+ return None
+ except queue.Full:
+ raise vimconn.VimConnException(self.name + ": timeout inserting a task")
+
+ def del_task(self, task):
+ with self.task_lock:
+ if task["status"] == "SCHEDULED":
+ task["status"] = "SUPERSEDED"
+ return True
+ else: # task["status"] == "processing"
+ self.task_lock.release()
+ return False
+
+ def run(self):
+ self.logger.info("Starting")
+ while True:
+ self.get_vim_sdn_connector()
+ reload_thread = False
+
+ while True:
+ try:
+ while not self.task_queue.empty():
+ task = self.task_queue.get()
+ if isinstance(task, list):
+ pass
+ elif isinstance(task, str):
+ if task == 'exit':
+ return 0
+ elif task == 'reload':
+ reload_thread = True
+ break
+ self.task_queue.task_done()
+ if reload_thread:
+ break
+
+ task, related_tasks = self._get_db_task()
+ if task:
+ self._proccess_pending_tasks(task, related_tasks)
+ else:
+ time.sleep(5)
+
+ except Exception as e:
+ self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+
+ self.logger.debug("Finishing")
+
+ def _look_for_task(self, instance_action_id, task_id):
+ """
+ Look for a concrete task at vim_actions database table
+ :param instance_action_id: The instance_action_id
+ :param task_id: Can have several formats:
+ <task index>: integer
+ TASK-<task index> :backward compatibility,
+ [TASK-]<instance_action_id>.<task index>: this instance_action_id overrides the one in the parameter
+ :return: Task dictionary or None if not found
+ """
+ if isinstance(task_id, int):
+ task_index = task_id
+ else:
+ if task_id.startswith("TASK-"):
+ task_id = task_id[5:]
+ ins_action_id, _, task_index = task_id.rpartition(".")
+ if ins_action_id:
+ instance_action_id = ins_action_id
+
+ tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
+ "task_index": task_index})
+ if not tasks:
+ return None
+ task = tasks[0]
+ task["params"] = None
+ task["depends"] = {}
+ if task["extra"]:
+ extra = yaml.load(task["extra"], Loader=yaml.Loader)
+ task["extra"] = extra
+ task["params"] = extra.get("params")
+ else:
+ task["extra"] = {}
+ return task
+
+ @staticmethod
+ def _format_vim_error_msg(error_text, max_length=1024):
+ if error_text and len(error_text) >= max_length:
+ return error_text[:max_length // 2 - 3] + " ... " + error_text[-max_length // 2 + 3:]
+ return error_text
+
+ def new_vm(self, task):
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ params = task["params"]
+ depends = task.get("depends")
+ net_list = params[5]
+ for net in net_list:
+ if "net_id" in net and is_task_id(net["net_id"]): # change task_id into network_id
+ network_id = task["depends"][net["net_id"]].get("vim_id")
+ if not network_id:
+ raise VimThreadException(
+ "Cannot create VM because depends on a network not created or found: " +
+ str(depends[net["net_id"]]["error_msg"]))
+ net["net_id"] = network_id
+ params_copy = deepcopy(params)
+ vim_vm_id, created_items = self.vim.new_vminstance(*params_copy)
+
+ # fill task_interfaces. Look for snd_net_id at database for each interface
+ task_interfaces = {}
+ for iface in params_copy[5]:
+ task_interfaces[iface["vim_id"]] = {"iface_id": iface["uuid"]}
+ result = self.db.get_rows(
+ SELECT=('sdn_net_id', 'interface_id'),
+ FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
+ WHERE={'ii.uuid': iface["uuid"]})
+ if result:
+ task_interfaces[iface["vim_id"]]["sdn_net_id"] = result[0]['sdn_net_id']
+ task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
+ else:
+ self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
+ iface["uuid"]),
+ exc_info=True)
+
+ task["vim_info"] = {}
+ task["extra"]["interfaces"] = task_interfaces
+ task["extra"]["created"] = True
+ task["extra"]["created_items"] = created_items
+ task["extra"]["vim_status"] = "BUILD"
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_vm_id
+ instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("task={} new-VM: {}".format(task_id, e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
+ return instance_element_update
+
+ def del_vm(self, task):
+ # task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ vm_vim_id = task["vim_id"]
+ # interfaces = task["extra"].get("interfaces", ())
+ try:
+ # for iface in interfaces.values():
+ # if iface.get("sdn_port_id"):
+ # try:
+ # self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
+ # except ovimException as e:
+ # self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
+ # task_id, iface["sdn_port_id"], e), exc_info=True)
+ # # TODO Set error_msg at instance_nets
+
+ self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def _get_net_internal(self, task, filter_param):
+ """
+ Common code for get_net and new_net. It looks for a network on VIM with the filter_params
+ :param task: task for this find or find-or-create action
+ :param filter_param: parameters to send to the vimconnector
+ :return: a dict with the content to update the instance_nets database table. Raises an exception on error, or
+ when network is not found or found more than one
+ """
+ vim_nets = self.vim.get_network_list(filter_param)
+ if not vim_nets:
+ raise VimThreadExceptionNotFound("Network not found with this criteria: '{}'".format(filter_param))
+ elif len(vim_nets) > 1:
+ raise VimThreadException("More than one network found with this criteria: '{}'".format(filter_param))
+ vim_net_id = vim_nets[0]["id"]
+
+ # Discover if this network is managed by a sdn controller
+ sdn_net_id = None
+ result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
+ WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
+ ORDER="instance_scenario_id")
+ if result:
+ sdn_net_id = result[0]['sdn_net_id']
+
+ task["status"] = "DONE"
+ task["extra"]["vim_info"] = {}
+ task["extra"]["created"] = False
+ task["extra"]["vim_status"] = "BUILD"
+ task["extra"]["sdn_net_id"] = sdn_net_id
+ task["error_msg"] = None
+ task["vim_id"] = vim_net_id
+ instance_element_update = {"vim_net_id": vim_net_id, "created": False, "status": "BUILD",
+ "error_msg": None, "sdn_net_id": sdn_net_id}
+ return instance_element_update
+
+ def get_net(self, task):
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ params = task["params"]
+ filter_param = params[0]
+ instance_element_update = self._get_net_internal(task, filter_param)
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("task={} get-net: {}".format(task_id, e))
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
+ "error_msg": task["error_msg"]}
+ return instance_element_update
+
+ def new_net(self, task):
+ vim_net_id = None
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ action_text = ""
+ try:
+ # FIND
+ if task["extra"].get("find"):
+ action_text = "finding"
+ filter_param = task["extra"]["find"][0]
+ try:
+ instance_element_update = self._get_net_internal(task, filter_param)
+ return instance_element_update
+ except VimThreadExceptionNotFound:
+ pass
+ # CREATE
+ params = task["params"]
+ action_text = "creating VIM"
+
+ vim_net_id, created_items = self.vim.new_network(*params[0:5])
+
+ # net_name = params[0]
+ # net_type = params[1]
+ # wim_account_name = None
+ # if len(params) >= 6:
+ # wim_account_name = params[5]
+
+ # TODO fix at nfvo adding external port
+ # if wim_account_name and self.vim.config["wim_external_ports"]:
+ # # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
+ # action_text = "attaching external port to ovim network"
+ # sdn_port_name = "external_port"
+ # sdn_port_data = {
+ # "compute_node": "__WIM:" + wim_account_name[0:58],
+ # "pci": None,
+ # "vlan": network["vlan"],
+ # "net_id": sdn_net_id,
+ # "region": self.vim["config"]["datacenter_id"],
+ # "name": sdn_port_name,
+ # }
+ # try:
+ # sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ # except ovimException:
+ # sdn_port_data["compute_node"] = "__WIM"
+ # sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ # self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
+ # sdn_net_id))
+ task["status"] = "DONE"
+ task["extra"]["vim_info"] = {}
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ task["extra"]["vim_status"] = "BUILD"
+ task["extra"]["created"] = True
+ task["extra"]["created_items"] = created_items
+ task["error_msg"] = None
+ task["vim_id"] = vim_net_id
+ instance_element_update = {"vim_net_id": vim_net_id, "status": "BUILD",
+ "created": True, "error_msg": None}
+ return instance_element_update
+ except vimconn.VimConnException as e:
+ self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
+ task["status"] = "FAILED"
+ task["vim_id"] = vim_net_id
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ instance_element_update = {"vim_net_id": vim_net_id, "status": "VIM_ERROR",
+ "error_msg": task["error_msg"]}
+ return instance_element_update
+
+ def del_net(self, task):
+ net_vim_id = task["vim_id"]
+ # sdn_net_id = task["extra"].get("sdn_net_id")
+ try:
+ if net_vim_id:
+ self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
+ # if sdn_net_id:
+ # # Delete any attached port to this sdn network. There can be ports associated to this network in case
+ # # it was manually done using 'openmano vim-net-sdn-attach'
+ # port_list = self.ovim.get_ports(columns={'uuid'},
+ # filter={'name': 'external_port', 'net_id': sdn_net_id})
+ # for port in port_list:
+ # self.ovim.delete_port(port['uuid'], idempotent=True)
+ # self.ovim.delete_network(sdn_net_id, idempotent=True)
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def new_or_update_sdn_net(self, task):
+ wimconn_net_id = task["vim_id"]
+ created_items = task["extra"].get("created_items")
+ connected_ports = task["extra"].get("connected_ports", [])
+ new_connected_ports = []
+ last_update = task["extra"].get("last_update", 0)
+ sdn_status = task["extra"].get("vim_status", "BUILD")
+ sdn_info = None
+
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ error_list = []
+ try:
+ # FIND
+ if task["extra"].get("find"):
+ wimconn_id = task["extra"]["find"][0]
+ try:
+ instance_element_update = self.sdnconnector.get_connectivity_service_status(wimconn_id)
+ wimconn_net_id = wimconn_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "created": False, "status": "BUILD",
+ "error_msg": None, }
+ return instance_element_update
+ except Exception as e:
+ if isinstance(e, SdnConnectorError) and e.http_error == HTTPStatus.NOT_FOUND.value:
+ pass
+ else:
+ self._proccess_sdn_exception(e)
+
+ params = task["params"]
+ # CREATE
+ # look for ports
+ sdn_ports = []
+ pending_ports = 0
+ vlan_used = None
+
+ ports = self.db.get_rows(FROM='instance_interfaces', WHERE={'instance_wim_net_id': task["item_id"]})
+ sdn_need_update = False
+ for port in ports:
+ vlan_used = port.get("vlan") or vlan_used
+ # TODO. Do not connect if already done
+ if port.get("compute_node") and port.get("pci"):
+ for pmap in self.port_mappings:
+ if pmap.get("device_id") == port["compute_node"] and \
+ pmap.get("device_interface_id") == port["pci"]:
+ break
+ else:
+ if self.sdnconn_config.get("mapping_not_needed"):
+ pmap = {
+ "service_endpoint_id": "{}:{}".format(port["compute_node"], port["pci"]),
+ "service_endpoint_encapsulation_info": {
+ "vlan": port["vlan"],
+ "mac": port["mac_address"],
+ "device_id": port["compute_node"],
+ "device_interface_id": port["pci"]
+ }
+ }
+ else:
+ pmap = None
+ error_list.append("Port mapping not found for compute_node={} pci={}".format(
+ port["compute_node"], port["pci"]))
+
+ if pmap:
+ if port["modified_at"] > last_update:
+ sdn_need_update = True
+ new_connected_ports.append(port["uuid"])
+ sdn_ports.append({
+ "service_endpoint_id": pmap["service_endpoint_id"],
+ "service_endpoint_encapsulation_type": "dot1q" if port["model"] == "SR-IOV" else None,
+ "service_endpoint_encapsulation_info": {
+ "vlan": port["vlan"],
+ "mac": port["mac_address"],
+ "device_id": pmap.get("device_id"),
+ "device_interface_id": pmap.get("device_interface_id"),
+ "switch_dpid": pmap.get("switch_dpid"),
+ "switch_port": pmap.get("switch_port"),
+ "service_mapping_info": pmap.get("service_mapping_info"),
+ }
+ })
+
+ else:
+ pending_ports += 1
+ if pending_ports:
+ error_list.append("Waiting for getting interfaces location from VIM. Obtained '{}' of {}"
+ .format(len(ports)-pending_ports, len(ports)))
+
+ # connect external ports
+ for index, external_port in enumerate(task["extra"].get("sdn-ports") or ()):
+ external_port_id = external_port.get("service_endpoint_id") or str(index)
+ sdn_ports.append({
+ "service_endpoint_id": external_port_id,
+ "service_endpoint_encapsulation_type": external_port.get("service_endpoint_encapsulation_type",
+ "dot1q"),
+ "service_endpoint_encapsulation_info": {
+ "vlan": external_port.get("vlan") or vlan_used,
+ "mac": external_port.get("mac_address"),
+ "device_id": external_port.get("device_id"),
+ "device_interface_id": external_port.get("device_interface_id"),
+ "switch_dpid": external_port.get("switch_dpid") or external_port.get("switch_id"),
+ "switch_port": external_port.get("switch_port"),
+ "service_mapping_info": external_port.get("service_mapping_info"),
+ }})
+ new_connected_ports.append(external_port_id)
+
+ # if there are more ports to connect or they have been modified, call create/update
+ try:
+ if set(connected_ports) != set(new_connected_ports) or sdn_need_update:
+ last_update = time.time()
+ if not wimconn_net_id:
+ if len(sdn_ports) < 2:
+ if not pending_ports:
+ sdn_status = "ACTIVE"
+ else:
+ if params[0] == "data":
+ net_type = "ELAN"
+ elif params[0] == "ptp":
+ net_type = "ELINE"
+ else:
+ net_type = "L3"
+ wimconn_net_id, created_items = self.sdnconnector.create_connectivity_service(
+ net_type, sdn_ports)
+ else:
+ created_items = self.sdnconnector.edit_connectivity_service(
+ wimconn_net_id, conn_info=created_items, connection_points=sdn_ports)
+ connected_ports = new_connected_ports
+ elif wimconn_net_id:
+ wim_status_dict = self.sdnconnector.get_connectivity_service_status(wimconn_net_id,
+ conn_info=created_items)
+ sdn_status = wim_status_dict["sdn_status"]
+ if wim_status_dict.get("error_msg"):
+ error_list.append(wim_status_dict.get("error_msg"))
+ if wim_status_dict.get("sdn_info"):
+ sdn_info = str(wim_status_dict.get("sdn_info"))
+ except Exception as e:
+ self._proccess_sdn_exception(e)
+
+ task["status"] = "DONE"
+ task["extra"]["vim_info"] = {}
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ task["extra"]["vim_status"] = sdn_status
+ task["extra"]["created"] = True
+ task["extra"]["created_items"] = created_items
+ task["extra"]["connected_ports"] = connected_ports
+ task["extra"]["last_update"] = last_update
+ task["error_msg"] = self._format_vim_error_msg(" ; ".join(error_list))
+ task["vim_id"] = wimconn_net_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "status": sdn_status,
+ "created": True, "error_msg": task["error_msg"] or None}
+ except (vimconn.VimConnException, SdnConnectorError) as e:
+ self.logger.error("task={} new-sdn-net: Error: {}".format(task_id, e))
+ task["status"] = "FAILED"
+ task["vim_id"] = wimconn_net_id
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "status": "WIM_ERROR",
+ "error_msg": task["error_msg"]}
+
+ if sdn_info:
+ instance_element_update["wim_info"] = sdn_info
+ return instance_element_update
+
+ def del_sdn_net(self, task):
+ wimconn_net_id = task["vim_id"]
+ try:
+ try:
+ if wimconn_net_id:
+ self.sdnconnector.delete_connectivity_service(wimconn_net_id, task["extra"].get("created_items"))
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+ except Exception as e:
+ self._proccess_sdn_exception(e)
+ except SdnConnectorError as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if e.http_code == HTTPStatus.NOT_FOUND.value:
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ # Service Function Instances
+ def new_sfi(self, task):
+ vim_sfi_id = None
+ try:
+ # Waits for interfaces to be ready (avoids failure)
+ time.sleep(1)
+ dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ error_text = ""
+ interfaces = task["depends"][dep_id]["extra"].get("interfaces")
+
+ ingress_interface_id = task.get("extra").get("params").get("ingress_interface_id")
+ egress_interface_id = task.get("extra").get("params").get("egress_interface_id")
+ ingress_vim_interface_id = None
+ egress_vim_interface_id = None
+ for vim_interface, interface_data in interfaces.items():
+ if interface_data.get("interface_id") == ingress_interface_id:
+ ingress_vim_interface_id = vim_interface
+ break
+ if ingress_interface_id != egress_interface_id:
+ for vim_interface, interface_data in interfaces.items():
+ if interface_data.get("interface_id") == egress_interface_id:
+ egress_vim_interface_id = vim_interface
+ break
+ else:
+ egress_vim_interface_id = ingress_vim_interface_id
+ if not ingress_vim_interface_id or not egress_vim_interface_id:
+ error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
+ ingress_vim_interface_id, egress_vim_interface_id)
+ self.logger.error(error_text)
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ return None
+ # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+ # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
+ # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
+ ingress_port_id_list = [ingress_vim_interface_id]
+ egress_port_id_list = [egress_vim_interface_id]
+ name = "sfi-{}".format(task["item_id"][:8])
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfi_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+ return instance_element_update
+
+ def del_sfi(self, task):
+ sfi_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfi(sfi_vim_id)
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def new_sf(self, task):
+ vim_sf_id = None
+ try:
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ error_text = ""
+ depending_tasks = ["TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
+ # sfis = next(iter(task.get("depends").values())).get("extra").get("params")[5]
+ sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
+ sfi_id_list = []
+ for sfi in sfis:
+ sfi_id_list.append(sfi.get("vim_id"))
+ name = "sf-{}".format(task["item_id"][:8])
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sf_id
+ instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+ return instance_element_update
+
+ def del_sf(self, task):
+ sf_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sf(sf_vim_id)
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def new_classification(self, task):
+ vim_classification_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+ error_text = ""
+ interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces")
+ # Bear in mind that different VIM connectors might support Classifications differently.
+ # In the case of OpenStack, only the first VNF attached to the classifier will be used
+ # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+ # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+ # using the IPv4 flow classifier.
+ logical_source_port_vim_id = None
+ logical_source_port_id = params.get("logical_source_port")
+ for vim_interface, interface_data in interfaces.items():
+ if interface_data.get("interface_id") == logical_source_port_id:
+ logical_source_port_vim_id = vim_interface
+ break
+ if not logical_source_port_vim_id:
+ error_text = "Error creating Flow Classifier, Logical Source Port id {}".format(
+ logical_source_port_id)
+ self.logger.error(error_text)
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ return None
+
+ name = "c-{}".format(task["item_id"][:8])
+ # if not CIDR is given for the IP addresses, add /32:
+ ip_proto = int(params.get("ip_proto"))
+ source_ip = params.get("source_ip")
+ destination_ip = params.get("destination_ip")
+ source_port = params.get("source_port")
+ destination_port = params.get("destination_port")
+ definition = {"logical_source_port": logical_source_port_vim_id}
+ if ip_proto:
+ if ip_proto == 1:
+ ip_proto = 'icmp'
+ elif ip_proto == 6:
+ ip_proto = 'tcp'
+ elif ip_proto == 17:
+ ip_proto = 'udp'
+ definition["protocol"] = ip_proto
+ if source_ip:
+ if '/' not in source_ip:
+ source_ip += '/32'
+ definition["source_ip_prefix"] = source_ip
+ if source_port and ip_proto !='icmp':
+ definition["source_port_range_min"] = source_port
+ definition["source_port_range_max"] = source_port
+ if destination_port and ip_proto !='icmp':
+ definition["destination_port_range_min"] = destination_port
+ definition["destination_port_range_max"] = destination_port
+ if destination_ip:
+ if '/' not in destination_ip:
+ destination_ip += '/32'
+ definition["destination_ip_prefix"] = destination_ip
+
+ vim_classification_id = self.vim.new_classification(
+ name, 'legacy_flow_classifier', definition)
+
+ task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_classification_id
+ instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
+ "error_msg": None}
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+ return instance_element_update
+
+ def del_classification(self, task):
+ classification_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_classification(classification_vim_id)
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def new_sfp(self, task):
+ vim_sfp_id = None
+ try:
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
+ task.get("extra").get("depends_on")]
+ error_text = ""
+ sf_id_list = []
+ classification_id_list = []
+ for dep in depending_tasks:
+ vim_id = dep.get("vim_id")
+ resource = dep.get("item")
+ if resource == "instance_sfs":
+ sf_id_list.append(vim_id)
+ elif resource == "instance_classifications":
+ classification_id_list.append(vim_id)
+
+ name = "sfp-{}".format(task["item_id"][:8])
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfp_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+ return instance_element_update
+
+ except (vimconn.VimConnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+ return instance_element_update
+
+ def del_sfp(self, task):
+ sfp_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfp(sfp_vim_id)
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+
+ except vimconn.VimConnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.VimConnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
+ task["status"] = "FAILED"
+ return None
+
+ def _refres_sfps(self, task):
+ """Call VIM to get SFPs status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ sfp_to_refresh_list = [vim_id]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ vim_dict = self.vim.refresh_sfps_status(sfp_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task={} get-sfp: vimconnException when trying to refresh sfps {}".format(task_id, e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ self.logger.debug("task={} get-sfp: vim_sfp_id={} result={}".format(task_id, task["vim_id"], vim_info))
+ #TODO: Revise this part
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"])
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
+
+ def _refres_sfis(self, task):
+ """Call VIM to get sfis status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ sfi_to_refresh_list = [vim_id]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ vim_dict = self.vim.refresh_sfis_status(sfi_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task={} get-sfi: vimconnException when trying to refresh sfis {}".format(task_id, e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ self.logger.debug("task={} get-sfi: vim_sfi_id={} result={}".format(task_id, task["vim_id"], vim_info))
+ #TODO: Revise this part
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"])
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
+
+ def _refres_sfs(self, task):
+ """Call VIM to get sfs status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ sf_to_refresh_list = [vim_id]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ vim_dict = self.vim.refresh_sfs_status(sf_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task={} get-sf: vimconnException when trying to refresh sfs {}".format(task_id, e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ self.logger.debug("task={} get-sf: vim_sf_id={} result={}".format(task_id, task["vim_id"], vim_info))
+ #TODO: Revise this part
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"])
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
+
+ def _refres_classifications(self, task):
+ """Call VIM to get classifications status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ classification_to_refresh_list = [vim_id]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ try:
+ vim_dict = self.vim.refresh_classifications_status(classification_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.VimConnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task={} get-classification: vimconnException when trying to refresh classifications {}"
+ .format(task_id, e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ self.logger.debug("task={} get-classification: vim_classification_id={} result={}".format(task_id,
+ task["vim_id"], vim_info))
+ #TODO: Revise this part
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"])
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##
+# This file is standalone vmware vcloud director util
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: mbayramov@vmware.com
+##
+
+"""
+
+Standalone application that leverage openmano vmware connector work with vCloud director rest api.
+
+ - Provides capability to create and delete VDC for specific organization.
+ - Create, delete and manage network for specific VDC
+ - List deployed VM's , VAPPs, VDSs, Organization
+ - View detail information about VM / Vapp , Organization etc
+ - Operate with images upload / boot / power on etc
+
+ Usage example.
+
+ List organization created in vCloud director
+ vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
+
+ List VDC for particular organization
+ vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
+
+ Upload image
+ python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
+
+ Boot Image
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
+
+ View vApp
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
+
+ List VMS
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
+
+ List VDC in OSM format
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
+
+Mustaafa Bayramov
+mbayramov@vmware.com
+"""
+import os
+import argparse
+import traceback
+import uuid
+
+from xml.etree import ElementTree as ET
+
+import sys
+from pyvcloud import Http
+
+import logging
+from osm_ro_plugin import vimconn
+import time
+import uuid
+import urllib3
+import requests
+
+from osm_ro.vimconn_vmware import vimconnector
+# TODO py3 uncoment from requests.packages.urllib3.exceptions import InsecureRequestWarning
+from prettytable import PrettyTable
+
+# TODO py3 uncoment requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+__author__ = "Mustafa Bayramov"
+__date__ = "$16-Sep-2016 11:09:29$"
+
+
+# TODO move to main vim
+def delete_network_action(vca=None, network_uuid=None):
+ """
+ Method leverages vCloud director and query network based on network uuid
+
+ Args:
+ vca - is active VCA connection.
+ network_uuid - is a network uuid
+
+ Returns:
+ The return XML respond
+ """
+
+ if vca is None or network_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ print(response.content)
+ return response.content
+
+ return None
+
+
+def print_vapp(vapp_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ vapp_dict: container vapp object.
+
+ Returns:
+ The return nothing
+ """
+
+ # following key available to print
+ # {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
+ # 'memoryMB': '384',
+ # 'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
+ # 'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
+ # 'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
+ # 'containerName': 'Cirros Template', # 'container':
+ # 'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
+ # 'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
+ # 'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
+ # 'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
+
+ if vapp_dict is None:
+ return
+
+ vm_table = PrettyTable(['vm uuid',
+ 'vapp name',
+ 'vapp uuid',
+ 'network name',
+ 'storage name',
+ 'vcpu', 'memory', 'hw ver','deployed','status'])
+ for k in vapp_dict:
+ entry = []
+ entry.append(k)
+ entry.append(vapp_dict[k]['containerName'])
+ # vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
+ entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
+ entry.append(vapp_dict[k]['networkName'])
+ entry.append(vapp_dict[k]['storageProfileName'])
+ entry.append(vapp_dict[k]['numberOfCpus'])
+ entry.append(vapp_dict[k]['memoryMB'])
+ entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
+ entry.append(vapp_dict[k]['isDeployed'])
+ entry.append(vapp_dict[k]['status'])
+
+ vm_table.add_row(entry)
+
+ print(vm_table)
+
+
+def print_org(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+
+ if org_dict is None:
+ return
+
+ org_table = PrettyTable(['org uuid', 'name'])
+ for k in org_dict:
+ entry = [k, org_dict[k]]
+ org_table.add_row(entry)
+
+ print(org_table)
+
+
+def print_vm_list(vm_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ vm_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if vm_dict is None:
+ return
+
+ vm_table = PrettyTable(
+ ['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
+
+ try:
+ for k in vm_dict:
+ entry = []
+ entry.append(k)
+ entry.append(vm_dict[k]['name'])
+ entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
+ entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
+ entry.append(vm_dict[k]['networkName'])
+ entry.append(vm_dict[k]['isDeployed'])
+ entry.append(vm_dict[k]['numberOfCpus'])
+ entry.append(vm_dict[k]['memoryMB'])
+ entry.append(vm_dict[k]['status'])
+ vm_table.add_row(entry)
+ print(vm_table)
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ pass
+
+
+def print_vdc_list(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ vdcs_dict = {}
+ if 'vdcs' in org_dict:
+ vdcs_dict = org_dict['vdcs']
+ vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
+ for k in vdcs_dict:
+ entry = [k, vdcs_dict[k]]
+ vdc_table.add_row(entry)
+
+ print(vdc_table)
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def print_network_list(org_dict=None):
+ """ Method print network list.
+
+ Args:
+ org_dict: dictionary of organization that contain key networks with a list of all
+ network for for specific VDC
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ network_dict = {}
+ if 'networks' in org_dict:
+ network_dict = org_dict['networks']
+ network_table = PrettyTable(['network uuid', 'network name'])
+ for k in network_dict:
+ entry = [k, network_dict[k]]
+ network_table.add_row(entry)
+
+ print(network_table)
+
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def print_org_details(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ catalogs_dict = {}
+
+ print_vdc_list(org_dict=org_dict)
+ print_network_list(org_dict=org_dict)
+
+ if 'catalogs' in org_dict:
+ catalogs_dict = org_dict['catalogs']
+
+ catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
+ for k in catalogs_dict:
+ entry = [k, catalogs_dict[k]]
+ catalog_table.add_row(entry)
+
+ print(catalog_table)
+
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def delete_actions(vim=None, action=None, namespace=None):
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Requesting delete for network {}".format(namespace.network_name))
+ network_uuid = namespace.network_name
+ # if request name based we need find UUID
+ # TODO optimize it or move to external function
+ if not namespace.uuid:
+ org_dict = vim.get_org_list()
+ for org in org_dict:
+ org_net = vim.get_org(org)['networks']
+ for network in org_net:
+ if org_net[network] == namespace.network_name:
+ network_uuid = network
+
+ vim.delete_network_action(network_uuid=network_uuid)
+
+
+def list_actions(vim=None, action=None, namespace=None):
+ """ Method provide list object from VDC action
+
+ Args:
+ vim - is vcloud director vim connector.
+ action - is action for list ( vdc / org etc)
+ namespace - must contain VDC / Org information.
+
+ Returns:
+ The return nothing
+ """
+
+ org_id = None
+ myorgs = vim.get_org_list()
+ for org in myorgs:
+ if myorgs[org] == namespace.vcdorg:
+ org_id = org
+ break
+ else:
+ print(" Invalid organization.")
+ return
+
+ if action == 'vms' or namespace.action == 'vms':
+ vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
+ print_vm_list(vm_dict=vm_dict)
+ elif action == 'vapps' or namespace.action == 'vapps':
+ vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
+ print_vapp(vapp_dict=vapp_dict)
+ elif action == 'networks' or namespace.action == 'networks':
+ if namespace.osm:
+ osm_print(vim.get_network_list(filter_dict={}))
+ else:
+ print_network_list(vim.get_org(org_uuid=org_id))
+ elif action == 'vdc' or namespace.action == 'vdc':
+ if namespace.osm:
+ osm_print(vim.get_tenant_list(filter_dict=None))
+ else:
+ print_vdc_list(vim.get_org(org_uuid=org_id))
+ elif action == 'org' or namespace.action == 'org':
+ print_org(org_dict=vim.get_org_list())
+ else:
+ return None
+
+
+def print_network_details(network_dict=None):
+ try:
+ network_table = PrettyTable(network_dict.keys())
+ entry = list(network_dict.values())
+ network_table.add_row(entry[0])
+ print(network_table)
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def osm_print(generic_dict=None):
+
+ try:
+ for element in generic_dict:
+ table = PrettyTable(element.keys())
+ entry = list(element.values())
+ table.add_row(entry[0])
+ print(table)
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def view_actions(vim=None, action=None, namespace=None):
+ org_id = None
+ orgs = vim.get_org_list()
+ for org in orgs:
+ if orgs[org] == namespace.vcdorg:
+ org_id = org
+ break
+ else:
+ print(" Invalid organization.")
+ return
+
+ myorg = vim.get_org(org_uuid=org_id)
+
+ # view org
+ if action == 'org' or namespace.action == 'org':
+ org_id = None
+ orgs = vim.get_org_list()
+ if namespace.uuid:
+ if namespace.org_name in orgs:
+ org_id = namespace.org_name
+ else:
+ # we need find UUID based on name provided
+ for org in orgs:
+ if orgs[org] == namespace.org_name:
+ org_id = org
+ break
+
+ logger.debug("Requesting view for orgs {}".format(org_id))
+ print_org_details(vim.get_org(org_uuid=org_id))
+
+ # view vapp action
+ if action == 'vapp' or namespace.action == 'vapp':
+ if namespace.vapp_name is not None and namespace.uuid:
+ logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
+ vapp_dict = {}
+ vapp_uuid = namespace.vapp_name
+ # if request based on just name we need get UUID
+ if not namespace.uuid:
+ vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
+ if vapp_uuid is None:
+ print("Can't find vapp by given name {}".format(namespace.vapp_name))
+ return
+
+ print(" namespace {}".format(namespace))
+ if vapp_dict is not None and namespace.osm:
+ vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
+ print(vm_info_dict)
+ if vapp_dict is not None and namespace.osm != True:
+ vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
+ print_vapp(vapp_dict=vapp_dict)
+
+ # view network
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Requesting view for network {}".format(namespace.network_name))
+ network_uuid = namespace.network_name
+ # if request name based we need find UUID
+ # TODO optimize it or move to external function
+ if not namespace.uuid:
+ if 'networks' not in myorg:
+ print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
+ vim.name,
+ vim.tenant_name))
+ return
+
+ my_org_net = myorg['networks']
+ for network in my_org_net:
+ if my_org_net[network] == namespace.network_name:
+ network_uuid = network
+ break
+
+ print(print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid)))
+
+
+def create_actions(vim=None, action=None, namespace=None):
+ """Method gets provider vdc view from vcloud director
+
+ Args:
+ vim - is Cloud director vim connector
+ action - action for create ( network / vdc etc)
+
+ Returns:
+ The return xml content of respond or None
+ """
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Creating a network in vcloud director".format(namespace.network_name))
+ network_uuid = vim.create_network(namespace.network_name)
+ if network_uuid is not None:
+ print("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
+ else:
+ print("Failed create a new network {}".format(namespace.network_name))
+ elif action == 'vdc' or namespace.action == 'vdc':
+ logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
+ vdc_uuid = vim.create_vdc(namespace.vdc_name)
+ if vdc_uuid is not None:
+ print("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
+ else:
+ print("Failed create a new vdc {}".format(namespace.vdc_name))
+ else:
+ return None
+
+
+def validate_uuid4(uuid_string):
+ """Function validate that string contain valid uuid4
+
+ Args:
+ uuid_string - valid UUID string
+
+ Returns:
+ The return true if string contain valid UUID format
+ """
+ try:
+ val = uuid.UUID(uuid_string, version=4)
+ except ValueError:
+ return False
+ return True
+
+
+def upload_image(vim=None, image_file=None):
+ """Function upload image to vcloud director
+
+ Args:
+ image_file - valid UUID string
+
+ Returns:
+ The return true if image uploaded correctly
+ """
+ try:
+ catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
+ if catalog_uuid is not None and validate_uuid4(catalog_uuid):
+ print("Image uploaded and uuid {}".format(catalog_uuid))
+ return True
+ except vimconn.VimConnException as upload_exception:
+ print("Failed uploaded {} image".format(image_file))
+ print("Error Reason: {}".format(upload_exception.message))
+ return False
+
+
+def boot_image(vim=None, image_name=None, vm_name=None):
+ """ Function boot image that resided in vcloud director.
+ The image name can be UUID of name.
+
+ Args:
+ vim - vim connector
+ image_name - image identified by UUID or text string.
+ vm_name - vmname
+
+
+ Returns:
+ The return true if image uploaded correctly
+ """
+
+ vim_catalog = None
+ try:
+ catalogs = vim.vca.get_catalogs()
+ if not validate_uuid4(image_name):
+ vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+ if vim_catalog is None:
+ return None
+ else:
+ vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+ if vim_catalog is None:
+ return None
+
+ print(" Booting {} image id {} ".format(vm_name, vim_catalog))
+ vm_uuid, _ = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
+ if vm_uuid is not None and validate_uuid4(vm_uuid):
+ print("Image booted and vm uuid {}".format(vm_uuid))
+ vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
+ if vapp_dict is not None:
+ print_vapp(vapp_dict=vapp_dict)
+ return True
+ except vimconn.VimConnNotFoundException as notFound:
+ print("Failed boot {} image".format(image_name))
+ print(notFound.message)
+ except vimconn.VimConnException as vimconError:
+ print("Failed boot {} image".format(image_name))
+ print(vimconError.message)
+ except:
+ print("Failed boot {} image".format(image_name))
+
+
+ return False
+
+
+def image_action(vim=None, action=None, namespace=None):
+ """ Function present set of action to manipulate with image.
+ - upload image
+ - boot image.
+ - delete image ( not yet done )
+
+ Args:
+ vim - vcloud director connector
+ action - string (upload/boot etc)
+ namespace - contain other attributes image name etc
+
+ Returns:
+ The return nothing
+ """
+
+ if action == 'upload' or namespace.action == 'upload':
+ upload_image(vim=vim, image_file=namespace.image)
+ elif action == 'boot' or namespace.action == 'boot':
+ boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
+ else:
+ return None
+
+
+def vmwarecli(command=None, action=None, namespace=None):
+ logger.debug("Namespace {}".format(namespace))
+ urllib3.disable_warnings()
+
+ vcduser = None
+ vcdpasword = None
+ vcdhost = None
+ vcdorg = None
+
+ if namespace.vcdvdc is None:
+ while True:
+ vcduser = input("Enter vcd username: ")
+ if vcduser is not None and len(vcduser) > 0:
+ break
+ else:
+ vcduser = namespace.vcduser
+
+ if namespace.vcdpassword is None:
+ while True:
+ vcdpasword = input("Please enter vcd password: ")
+ if vcdpasword is not None and len(vcdpasword) > 0:
+ break
+ else:
+ vcdpasword = namespace.vcdpassword
+
+ if namespace.vcdhost is None:
+ while True:
+ vcdhost = input("Please enter vcd host name or ip: ")
+ if vcdhost is not None and len(vcdhost) > 0:
+ break
+ else:
+ vcdhost = namespace.vcdhost
+
+ if namespace.vcdorg is None:
+ while True:
+ vcdorg = input("Please enter vcd organization name: ")
+ if vcdorg is not None and len(vcdorg) > 0:
+ break
+ else:
+ vcdorg = namespace.vcdorg
+
+ try:
+ vim = vimconnector(uuid=None,
+ name=vcdorg,
+ tenant_id=None,
+ tenant_name=namespace.vcdvdc,
+ url=vcdhost,
+ url_admin=vcdhost,
+ user=vcduser,
+ passwd=vcdpasword,
+ log_level="DEBUG",
+ config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
+ vim.vca = vim.connect()
+
+ except vimconn.VimConnConnectionException:
+ print("Failed connect to vcloud director. Please check credential and hostname.")
+ return
+
+ # list
+ if command == 'list' or namespace.command == 'list':
+ logger.debug("Client requested list action")
+ # route request to list actions
+ list_actions(vim=vim, action=action, namespace=namespace)
+
+ # view action
+ if command == 'view' or namespace.command == 'view':
+ logger.debug("Client requested view action")
+ view_actions(vim=vim, action=action, namespace=namespace)
+
+ # delete action
+ if command == 'delete' or namespace.command == 'delete':
+ logger.debug("Client requested delete action")
+ delete_actions(vim=vim, action=action, namespace=namespace)
+
+ # create action
+ if command == 'create' or namespace.command == 'create':
+ logger.debug("Client requested create action")
+ create_actions(vim=vim, action=action, namespace=namespace)
+
+ # image action
+ if command == 'image' or namespace.command == 'image':
+ logger.debug("Client requested create action")
+ image_action(vim=vim, action=action, namespace=namespace)
+
+
+if __name__ == '__main__':
+ defaults = {'vcdvdc': 'default',
+ 'vcduser': 'admin',
+ 'vcdpassword': 'admin',
+ 'vcdhost': 'https://localhost',
+ 'vcdorg': 'default',
+ 'debug': 'INFO'}
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
+ parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
+ parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
+ parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
+ parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
+ parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
+ parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
+ parser.add_argument('-d', '--debug', help='debug level', type=int)
+
+ parser_subparsers = parser.add_subparsers(help='commands', dest='command')
+ sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
+ sub_subparsers = sub.add_subparsers(dest='action')
+
+ list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
+ list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
+ list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
+ list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ #list vdc
+ list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
+ list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
+
+ create_sub = parser_subparsers.add_parser('create')
+ create_sub_subparsers = create_sub.add_subparsers(dest='action')
+ create_vms = create_sub_subparsers.add_parser('vms')
+ create_vapp = create_sub_subparsers.add_parser('vapp')
+ create_vapp.add_argument('uuid')
+
+ # add network
+ create_network = create_sub_subparsers.add_parser('network')
+ create_network.add_argument('network_name', action='store', help='create a network for a vdc')
+
+ # add VDC
+ create_vdc = create_sub_subparsers.add_parser('vdc')
+ create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
+
+ delete_sub = parser_subparsers.add_parser('delete')
+ del_sub_subparsers = delete_sub.add_subparsers(dest='action')
+ del_vms = del_sub_subparsers.add_parser('vms')
+ del_vapp = del_sub_subparsers.add_parser('vapp')
+ del_vapp.add_argument('uuid', help='view vapp based on UUID')
+
+ # delete network
+ del_network = del_sub_subparsers.add_parser('network')
+ del_network.add_argument('network_name', action='store',
+ help='- delete network for vcloud director by provided name')
+ del_network.add_argument('-u', '--uuid', default=False, action='store_true',
+ help='delete network for vcloud director by provided uuid')
+
+ # delete vdc
+ del_vdc = del_sub_subparsers.add_parser('vdc')
+
+ view_sub = parser_subparsers.add_parser('view')
+ view_sub_subparsers = view_sub.add_subparsers(dest='action')
+
+ view_vms_parser = view_sub_subparsers.add_parser('vms')
+ view_vms_parser.add_argument('uuid', default=False, action='store_true',
+ help='- View VM for specific uuid in vcloud director')
+ view_vms_parser.add_argument('name', default=False, action='store_true',
+ help='- View VM for specific vapp name in vcloud director')
+
+ # view vapp
+ view_vapp_parser = view_sub_subparsers.add_parser('vapp')
+ view_vapp_parser.add_argument('vapp_name', action='store',
+ help='- view vapp for specific vapp name in vcloud director')
+ view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
+ view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ # view network
+ view_network = view_sub_subparsers.add_parser('network')
+ view_network.add_argument('network_name', action='store',
+ help='- view network for specific network name in vcloud director')
+ view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
+
+ # view VDC command and actions
+ view_vdc = view_sub_subparsers.add_parser('vdc')
+ view_vdc.add_argument('vdc_name', action='store',
+ help='- View VDC based and action based on provided vdc uuid')
+ view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
+
+ # view organization command and actions
+ view_org = view_sub_subparsers.add_parser('org')
+ view_org.add_argument('org_name', action='store',
+ help='- View VDC based and action based on provided vdc uuid')
+ view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+ # upload image action
+ image_sub = parser_subparsers.add_parser('image')
+ image_subparsers = image_sub.add_subparsers(dest='action')
+ upload_parser = image_subparsers.add_parser('upload')
+ upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
+ upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
+
+ # boot vm action
+ boot_parser = image_subparsers.add_parser('boot')
+ boot_parser.add_argument('image', default=False, action='store', help='- Image name')
+ boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
+ boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+ namespace = parser.parse_args()
+ # put command_line args to mapping
+ command_line_args = {k: v for k, v in vars(namespace).items() if v}
+
+ d = defaults.copy()
+ d.update(os.environ)
+ d.update(command_line_args)
+
+ logger = logging.getLogger('mano.vim.vmware')
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch = logging.StreamHandler()
+ ch.setLevel(str.upper(d['debug']))
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel(getattr(logging, str.upper(d['debug'])))
+ logger.info(
+ "Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
+
+ logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
+
+ # main entry point.
+ vmwarecli(namespace=namespace)
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+
+"""Common logic for task management"""
+import logging
+from time import time
+
+import yaml
+
+from ..utils import (
+ filter_dict_keys,
+ filter_out_dict_keys,
+ merge_dicts,
+ remove_none_items,
+ truncate
+)
+
+PENDING, REFRESH, IGNORE = range(3)
+
+TIMEOUT = 1 * 60 * 60 # 1 hour
+MIN_ATTEMPTS = 10
+
+
+class Action(object):
+ """Create a basic object representing the action record.
+
+ Arguments:
+ record (dict): record as returned by the database
+ **kwargs: extra keyword arguments to overwrite the fields in record
+ """
+
+ PROPERTIES = [
+ 'task_index', # MD - Index number of the task.
+ # This together with the instance_action_id
+ # forms a unique key identifier
+ 'action', # MD - CREATE, DELETE, FIND
+ 'item', # MD - table name, eg. instance_wim_nets
+ 'item_id', # MD - uuid of the referenced entry in the
+ # previous table
+ 'instance_action_id', # MD - reference to a cohesive group of actions
+ # related to the same instance-scenario
+ 'wim_account_id', # MD - reference to the WIM account used
+ # by the thread/connector
+ 'wim_internal_id', # MD - internal ID used by the WIM to refer to
+ # the item
+ 'datacenter_vim_id', # MD - reference to the VIM account used
+ # by the thread/connector
+ 'vim_id', # MD - internal ID used by the VIM to refer to
+ # the item
+ 'status', # MD - SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
+ 'extra', # MD - text with yaml format at database,
+ # dict at memory with:
+ # `- params: list with the params to be sent to the VIM for CREATE
+ # or FIND. For DELETE the vim_id is taken from other
+ # related tasks
+ # `- find: (only for CREATE tasks) if present it should FIND
+ # before creating and use if existing.
+ # Contains the FIND params
+ # `- depends_on: list with the 'task_index'es of tasks that must be
+ # completed before. e.g. a vm creation depends on a net
+ # creation
+ # `- sdn_net_id: used for net.
+ # `- tries
+ # `- created_items:
+ # dictionary with extra elements created that need
+ # to be deleted. e.g. ports,
+ # `- volumes,...
+ # `- created: False if the VIM element is not created by
+ # other actions, and it should not be deleted
+ # `- wim_status: WIM status of the element. Stored also at database
+ # in the item table
+ 'params', # M - similar to extra[params]
+ 'depends_on', # M - similar to extra[depends_on]
+ 'depends', # M - dict with task_index(from depends_on) to
+ # task class
+ 'error_msg', # MD - descriptive text upon an error
+ 'created_at', # MD - task DB creation time
+ 'modified_at', # MD - last DB update time
+ 'process_at', # M - unix epoch when to process the task
+ ]
+
+ __slots__ = PROPERTIES + [
+ 'logger',
+ ]
+
+ def __init__(self, record, logger=None, **kwargs):
+ self.logger = logger or logging.getLogger('openmano.wim.action')
+ attrs = merge_dicts(dict.fromkeys(self.PROPERTIES), record, kwargs)
+ self.update(_expand_extra(attrs))
+
+ def __repr__(self):
+ return super(Action, self).__repr__() + repr(self.as_dict())
+
+ def as_dict(self, *fields):
+ """Representation of the object as a dict"""
+ attrs = (set(self.PROPERTIES) & set(fields)
+ if fields else self.PROPERTIES)
+ return {k: getattr(self, k) for k in attrs}
+
+ def as_record(self):
+ """Returns a dict that can be send to the persistence layer"""
+ special = ['params', 'depends_on', 'depends']
+ record = self.as_dict()
+ record['extra'].update(self.as_dict(*special))
+ non_fields = special + ['process_at']
+
+ return remove_none_items(filter_out_dict_keys(record, non_fields))
+
+ def update(self, values=None, **kwargs):
+ """Update the in-memory representation of the task (works similarly to
+ dict.update). The update is NOT automatically persisted.
+ """
+ # "white-listed mass assignment"
+ updates = merge_dicts(values, kwargs)
+ for attr in set(self.PROPERTIES) & set(updates.keys()):
+ setattr(self, attr, updates[attr])
+
+ def save(self, persistence, **kwargs):
+ """Persist current state of the object to the database.
+
+ Arguments:
+ persistence: object encapsulating the database
+ **kwargs: extra properties to be updated before saving
+
+ Note:
+ If any key word argument is passed, the object itself will be
+ changed as an extra side-effect.
+ """
+ action_id = self.instance_action_id
+ index = self.task_index
+ if kwargs:
+ self.update(kwargs)
+ properties = self.as_record()
+
+ return persistence.update_action(action_id, index, properties)
+
+ def fail(self, persistence, reason, status='FAILED'):
+ """Mark action as FAILED, updating tables accordingly"""
+ persistence.update_instance_action_counters(
+ self.instance_action_id,
+ failed=1,
+ done=(-1 if self.status == 'DONE' else 0))
+
+ self.status = status
+ self.error_msg = truncate(reason)
+ self.logger.error('%s %s: %s', self.id, status, reason)
+ return self.save(persistence)
+
+ def succeed(self, persistence, status='DONE'):
+ """Mark action as DONE, updating tables accordingly"""
+ persistence.update_instance_action_counters(
+ self.instance_action_id, done=1)
+ self.status = status
+ self.logger.debug('%s %s', self.id, status)
+ return self.save(persistence)
+
+ def defer(self, persistence, reason,
+ timeout=TIMEOUT, min_attempts=MIN_ATTEMPTS):
+ """Postpone the task processing, taking care to not timeout.
+
+ Arguments:
+ persistence: object encapsulating the database
+ reason (str): explanation for the delay
+ timeout (int): maximum delay tolerated since the first attempt.
+ Note that this number is a time delta, in seconds
+ min_attempts (int): Number of attempts to try before giving up.
+ """
+ now = time()
+ last_attempt = self.extra.get('last_attempted_at') or time()
+ attempts = self.extra.get('attempts') or 0
+
+ if last_attempt - now > timeout and attempts > min_attempts:
+ self.fail(persistence,
+ 'Timeout reached. {} attempts in the last {:d} min'
+ .format(attempts, last_attempt / 60))
+
+ self.extra['last_attempted_at'] = time()
+ self.extra['attempts'] = attempts + 1
+ self.logger.info('%s DEFERRED: %s', self.id, reason)
+ return self.save(persistence)
+
+ @property
+ def group_key(self):
+ """Key defining the group to which this tasks belongs"""
+ return (self.item, self.item_id)
+
+ @property
+ def processing(self):
+ """Processing status for the task (PENDING, REFRESH, IGNORE)"""
+ if self.status == 'SCHEDULED':
+ return PENDING
+
+ return IGNORE
+
+ @property
+ def id(self):
+ """Unique identifier of this particular action"""
+ return '{}[{}]'.format(self.instance_action_id, self.task_index)
+
+ @property
+ def is_scheduled(self):
+ return self.status == 'SCHEDULED'
+
+ @property
+ def is_build(self):
+ return self.status == 'BUILD'
+
+ @property
+ def is_done(self):
+ return self.status == 'DONE'
+
+ @property
+ def is_failed(self):
+ return self.status == 'FAILED'
+
+ @property
+ def is_superseded(self):
+ return self.status == 'SUPERSEDED'
+
+ def refresh(self, connector, persistence):
+ """Use the connector/persistence to refresh the status of the item.
+
+ After the item status is refreshed any change in the task should be
+ persisted to the database.
+
+ Arguments:
+ connector: object containing the classes to access the WIM or VIM
+ persistence: object containing the methods necessary to query the
+ database and to persist the updates
+ """
+ self.logger.debug(
+ 'Action `%s` has no refresh to be done',
+ self.__class__.__name__)
+
+ def expand_dependency_links(self, task_group):
+ """Expand task indexes into actual IDs"""
+ if not self.depends_on or (
+ isinstance(self.depends, dict) and self.depends):
+ return
+
+ num_tasks = len(task_group)
+ references = {
+ "TASK-{}".format(i): task_group[i]
+ for i in self.depends_on
+ if i < num_tasks and task_group[i].task_index == i and
+ task_group[i].instance_action_id == self.instance_action_id
+ }
+ self.depends = references
+
+ def become_superseded(self, superseding):
+ """When another action tries to supersede this one,
+ we need to change both of them, so the surviving actions will be
+ logic consistent.
+
+ This method should do the required internal changes, and also
+ suggest changes for the other, superseding, action.
+
+ Arguments:
+ superseding: other task superseding this one
+
+ Returns:
+ dict: changes suggested to the action superseding this one.
+ A special key ``superseding_needed`` is used to
+ suggest if the superseding is actually required or not.
+ If not present, ``superseding_needed`` is assumed to
+ be False.
+ """
+ self.status = 'SUPERSEDED'
+ self.logger.debug(
+ 'Action `%s` was superseded by `%s`',
+ self.__class__.__name__, superseding.__class__.__name__)
+ return {}
+
+ def supersede(self, others):
+ """Supersede other tasks, if necessary
+
+ Arguments:
+ others (list): action objects being superseded
+
+ When the task decide to supersede others, this method should call
+ ``become_superseded`` on the other actions, collect the suggested
+ updates and perform the necessary changes
+ """
+ # By default actions don't supersede others
+ self.logger.debug(
+ 'Action `%s` does not supersede other actions',
+ self.__class__.__name__)
+
+ def process(self, connector, persistence, ovim):
+ """Abstract method, that needs to be implemented.
+ Process the current task.
+
+ Arguments:
+ connector: object with API for accessing the WAN
+ Infrastructure Manager system
+ persistence: abstraction layer for the database
+ ovim: instance of openvim, abstraction layer that enable
+ SDN-related operations
+ """
+ raise NotImplementedError
+
+
+class FindAction(Action):
+ """Abstract class that should be inherited for FIND actions, depending on
+ the item type.
+ """
+ @property
+ def processing(self):
+ if self.status in ('DONE', 'BUILD'):
+ return REFRESH
+
+ return super(FindAction, self).processing
+
+ def become_superseded(self, superseding):
+ super(FindAction, self).become_superseded(superseding)
+ info = ('vim_id', 'wim_internal_id')
+ return remove_none_items({f: getattr(self, f) for f in info})
+
+
+class CreateAction(Action):
+ """Abstract class that should be inherited for CREATE actions, depending on
+ the item type.
+ """
+ @property
+ def processing(self):
+ if self.status in ('DONE', 'BUILD'):
+ return REFRESH
+
+ return super(CreateAction, self).processing
+
+ def become_superseded(self, superseding):
+ super(CreateAction, self).become_superseded(superseding)
+
+ created = self.extra.get('created', True)
+ sdn_net_id = self.extra.get('sdn_net_id')
+ pending_info = self.wim_internal_id or self.vim_id or sdn_net_id
+ if not(created and pending_info):
+ return {}
+
+ extra_fields = ('sdn_net_id', 'interfaces', 'created_items')
+ extra_info = filter_dict_keys(self.extra or {}, extra_fields)
+
+ return {'superseding_needed': True,
+ 'wim_internal_id': self.wim_internal_id,
+ 'vim_id': self.vim_id,
+ 'extra': remove_none_items(extra_info)}
+
+
+class DeleteAction(Action):
+ """Abstract class that should be inherited for DELETE actions, depending on
+ the item type.
+ """
+ def supersede(self, others):
+ self.logger.debug('%s %s %s %s might supersede other actions',
+ self.id, self.action, self.item, self.item_id)
+ # First collect all the changes from the superseded tasks
+ changes = [other.become_superseded(self) for other in others]
+ needed = any(change.pop('superseding_needed', False)
+ for change in changes)
+
+ # Deal with the nested ones first
+ extras = [change.pop('extra', None) or {} for change in changes]
+ items = [extra.pop('created_items', None) or {} for extra in extras]
+ items = merge_dicts(self.extra.get('created_items', {}), *items)
+ self.extra = merge_dicts(self.extra, {'created_items': items}, *extras)
+
+ # Accept the other ones
+ change = ((key, value) for key, value in merge_dicts(*changes).items()
+ if key in self.PROPERTIES)
+ for attr, value in change:
+ setattr(self, attr, value)
+
+ # Reevaluate if the action itself is needed
+ if not needed:
+ self.status = 'SUPERSEDED'
+
+
+def _expand_extra(record):
+ extra = record.pop('extra', None) or {}
+ if isinstance(extra, str):
+ extra = yaml.safe_load(extra)
+
+ record['params'] = extra.get('params')
+ record['depends_on'] = extra.get('depends_on', [])
+ record['depends'] = extra.get('depends', None)
+ record['extra'] = extra
+
+ return record
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains the domain logic, and the implementation of the
+required steps to perform VNF management and orchestration in a WAN
+environment.
+
+It works as an extension/complement to the main functions contained in the
+``nfvo.py`` file and avoids interacting directly with the database, by relying
+on the `persistence` module.
+
+No http request handling/direct interaction with the database should be present
+in this file.
+"""
+import json
+import logging
+from contextlib import contextmanager
+from itertools import groupby
+from operator import itemgetter
+# from sys import exc_info
+from uuid import uuid4
+
+from ..utils import remove_none_items
+from .actions import Action
+from .errors import (
+ DbBaseException,
+ NoWimConnectedToDatacenters,
+ UnexpectedDatabaseError,
+ WimAccountNotActive,
+ UndefinedWimConnector
+)
+from .wim_thread import WimThread
+# from ..http_tools.errors import Bad_Request
+from pkg_resources import iter_entry_points
+
+
+class WimEngine(object):
+ """Logic supporting the establishment of WAN links when NS spans across
+ different datacenters.
+ """
+ def __init__(self, persistence, plugins, logger=None, ovim=None):
+ self.persist = persistence
+ self.plugins = plugins if plugins is not None else {}
+ self.logger = logger or logging.getLogger('openmano.wim.engine')
+ self.threads = {}
+ self.connectors = {}
+ self.ovim = ovim
+
+ def _load_plugin(self, name, type="sdn"):
+ # type can be vim or sdn
+ for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+ self.plugins[name] = v.load()
+ if name and name not in self.plugins:
+ raise UndefinedWimConnector(type, name)
+
+ def create_wim(self, properties):
+ """Create a new wim record according to the properties
+
+ Please check the wim schema to have more information about
+ ``properties``.
+
+ The ``config`` property might contain a ``wim_port_mapping`` dict,
+ In this case, the method ``create_wim_port_mappings`` will be
+ automatically invoked.
+
+ Returns:
+ str: uuid of the newly created WIM record
+ """
+ port_mapping = ((properties.get('config', {}) or {})
+ .pop('wim_port_mapping', {}))
+ plugin_name = "rosdn_" + properties["type"]
+ if plugin_name not in self.plugins:
+ self._load_plugin(plugin_name, type="sdn")
+
+ uuid = self.persist.create_wim(properties)
+
+ if port_mapping:
+ try:
+ self.create_wim_port_mappings(uuid, port_mapping)
+ except DbBaseException as e:
+ # Rollback
+ self.delete_wim(uuid)
+ ex = UnexpectedDatabaseError('Failed to create port mappings'
+ 'Rolling back wim creation')
+ self.logger.exception(str(ex))
+ raise ex from e
+
+ return uuid
+
+ def get_wim(self, uuid_or_name, tenant_id=None):
+ """Retrieve existing WIM record by name or id.
+
+ If ``tenant_id`` is specified, the query will be
+ limited to the WIM associated to the given tenant.
+ """
+ # Since it is a pure DB operation, we can delegate it directly
+ return self.persist.get_wim(uuid_or_name, tenant_id)
+
+ def update_wim(self, uuid_or_name, properties):
+ """Edit an existing WIM record.
+
+ ``properties`` is a dictionary with the properties being changed,
+ if a property is not present, the old value will be preserved
+
+ Similarly to create_wim, the ``config`` property might contain a
+ ``wim_port_mapping`` dict, In this case, port mappings will be
+ automatically updated.
+ """
+ port_mapping = ((properties.get('config', {}) or {})
+ .pop('wim_port_mapping', {}))
+ orig_props = self.persist.get_by_name_or_uuid('wims', uuid_or_name)
+ uuid = orig_props['uuid']
+
+ response = self.persist.update_wim(uuid, properties)
+
+ if port_mapping:
+ try:
+ # It is very complex to diff and update individually all the
+ # port mappings. Therefore a practical approach is just delete
+ # and create it again.
+ self.persist.delete_wim_port_mappings(uuid)
+ # ^ Calling from persistence avoid reloading twice the thread
+ self.create_wim_port_mappings(uuid, port_mapping)
+ except DbBaseException as e:
+ # Rollback
+ self.update_wim(uuid_or_name, orig_props)
+ ex = UnexpectedDatabaseError('Failed to update port mappings'
+ 'Rolling back wim updates\n')
+ self.logger.exception(str(ex))
+ raise ex from e
+
+ return response
+
+ def delete_wim(self, uuid_or_name):
+ """Kill the corresponding wim threads and erase the WIM record"""
+ # Theoretically, we can rely on the database to drop the wim_accounts
+ # automatically, since we have configures 'ON CASCADE DELETE'.
+ # However, use use `delete_wim_accounts` to kill all the running
+ # threads.
+ self.delete_wim_accounts(uuid_or_name)
+ return self.persist.delete_wim(uuid_or_name)
+
+ def create_wim_account(self, wim, tenant, properties):
+ """Create an account that associates a tenant to a WIM.
+
+ As a side effect this function will spawn a new thread
+
+ Arguments:
+ wim (str): name or uuid of the WIM related to the account being
+ created
+ tenant (str): name or uuid of the nfvo tenant to which the account
+ will be created
+ properties (dict): properties of the account
+ (eg. username, password, ...)
+
+ Returns:
+ dict: Created record
+ """
+ uuid = self.persist.create_wim_account(wim, tenant, properties)
+ account = self.persist.get_wim_account_by(uuid=uuid)
+ # ^ We need to use get_wim_account_by here, since this methods returns
+ # all the associations, and we need the wim to create the thread
+ self._spawn_thread(account)
+ return account
+
+ def _update_single_wim_account(self, account, properties):
+ """Update WIM Account, taking care to reload the corresponding thread
+
+ Arguments:
+ account (dict): Current account record
+ properties (dict): Properties to be updated
+
+ Returns:
+ dict: updated record
+ """
+ account = self.persist.update_wim_account(account['uuid'], properties)
+ self.threads[account['uuid']].reload()
+ return account
+
+ def update_wim_accounts(self, wim, tenant, properties):
+ """Update all the accounts related to a WIM and a tenant,
+ thanking care of reloading threads.
+
+ Arguments:
+ wim (str): uuid or name of a WIM record
+ tenant (str): uuid or name of a NFVO tenant record
+ properties (dict): attributes with values to be updated
+
+ Returns
+ list: Records that were updated
+ """
+ accounts = self.persist.get_wim_accounts_by(wim, tenant)
+ return [self._update_single_wim_account(account, properties)
+ for account in accounts]
+
+ def _delete_single_wim_account(self, account):
+ """Delete WIM Account, taking care to remove the corresponding thread
+ and delete the internal WIM account, if it was automatically generated.
+
+ Arguments:
+ account (dict): Current account record
+ properties (dict): Properties to be updated
+
+ Returns:
+ dict: current record (same as input)
+ """
+ self.persist.delete_wim_account(account['uuid'])
+
+ if account['uuid'] not in self.threads:
+ raise WimAccountNotActive(
+ 'Requests send to the WIM Account %s are not currently '
+ 'being processed.', account['uuid'])
+ else:
+ self.threads[account['uuid']].exit()
+ del self.threads[account['uuid']]
+
+ return account
+
+ def delete_wim_accounts(self, wim, tenant=None, **kwargs):
+ """Delete all the accounts related to a WIM (and a tenant),
+ thanking care of threads and internal WIM accounts.
+
+ Arguments:
+ wim (str): uuid or name of a WIM record
+ tenant (str): uuid or name of a NFVO tenant record
+
+ Returns
+ list: Records that were deleted
+ """
+ kwargs.setdefault('error_if_none', False)
+ accounts = self.persist.get_wim_accounts_by(wim, tenant, **kwargs)
+ return [self._delete_single_wim_account(a) for a in accounts]
+
+ def _reload_wim_threads(self, wim_id):
+ for thread in self.threads.values():
+ if thread.wim_account['wim_id'] == wim_id:
+ thread.reload()
+
+ def create_wim_port_mappings(self, wim, properties, tenant=None):
+ """Store information about port mappings from Database"""
+ # TODO: Review tenants... WIMs can exist across different tenants,
+ # and the port_mappings are a WIM property, not a wim_account
+ # property, so the concepts are not related
+ wim = self.persist.get_by_name_or_uuid('wims', wim)
+ result = self.persist.create_wim_port_mappings(wim, properties, tenant)
+ self._reload_wim_threads(wim['uuid'])
+ return result
+
+ def get_wim_port_mappings(self, wim):
+ """Retrive information about port mappings from Database"""
+ return self.persist.get_wim_port_mappings(wim)
+
+ def delete_wim_port_mappings(self, wim):
+ """Erase the port mapping records associated with the WIM"""
+ wim = self.persist.get_by_name_or_uuid('wims', wim)
+ message = self.persist.delete_wim_port_mappings(wim['uuid'])
+ self._reload_wim_threads(wim['uuid'])
+ return message
+
+ def find_common_wims(self, datacenter_ids, tenant):
+ """Find WIMs that are common to all datacenters listed"""
+ mappings = self.persist.get_wim_port_mappings(
+ datacenter=datacenter_ids, tenant=tenant, error_if_none=False)
+
+ wim_id_of = itemgetter('wim_id')
+ sorted_mappings = sorted(mappings, key=wim_id_of) # needed by groupby
+ grouped_mappings = groupby(sorted_mappings, key=wim_id_of)
+ mapped_datacenters = {
+ wim_id: [m['datacenter_id'] for m in mappings]
+ for wim_id, mappings in grouped_mappings
+ }
+
+ return [
+ wim_id
+ for wim_id, connected_datacenters in mapped_datacenters.items()
+ if set(connected_datacenters) >= set(datacenter_ids)
+ ]
+
+ def find_common_wim(self, datacenter_ids, tenant):
+ """Find a single WIM that is able to connect all the datacenters
+ listed
+
+ Raises:
+ NoWimConnectedToDatacenters: if no WIM connected to all datacenters
+ at once is found
+ """
+ suitable_wim_ids = self.find_common_wims(datacenter_ids, tenant)
+
+ if not suitable_wim_ids:
+ raise NoWimConnectedToDatacenters(datacenter_ids)
+
+ # TODO: use a criteria to determine which WIM is going to be used,
+ # instead of always using the first one (strategy pattern can be
+ # used here)
+ return suitable_wim_ids[0]
+
+ def find_suitable_wim_account(self, datacenter_ids, tenant):
+ """Find a WIM account that is able to connect all the datacenters
+ listed
+
+ Arguments:
+ datacenter_ids (list): List of UUIDs of all the datacenters (vims),
+ that need to be connected.
+ tenant (str): UUID of the OSM tenant
+
+ Returns:
+ object with the WIM account that is able to connect all the
+ datacenters.
+ """
+ wim_id = self.find_common_wim(datacenter_ids, tenant)
+ return self.persist.get_wim_account_by(wim_id, tenant)
+
+ def derive_wan_link(self,
+ wim_usage,
+ instance_scenario_id, sce_net_id,
+ networks, tenant, related=None):
+ """Create a instance_wim_nets record for the given information"""
+ if sce_net_id in wim_usage:
+ account_id = wim_usage[sce_net_id]
+ account = self.persist.get_wim_account_by(uuid=account_id)
+ wim_id = account['wim_id']
+ else:
+ datacenters = [n['datacenter_id'] for n in networks]
+ wim_id = self.find_common_wim(datacenters, tenant)
+ account = self.persist.get_wim_account_by(wim_id, tenant)
+
+ return {
+ 'uuid': str(uuid4()),
+ 'instance_scenario_id': instance_scenario_id,
+ 'sce_net_id': sce_net_id,
+ 'wim_id': wim_id,
+ 'wim_account_id': account['uuid'],
+ 'related': related
+ }
+
+ def derive_wan_links(self, wim_usage, networks, tenant=None):
+ """Discover and return what are the wan_links that have to be created
+ considering a set of networks (VLDs) required for a scenario instance
+ (NSR).
+
+ Arguments:
+ wim_usage(dict): Mapping between sce_net_id and wim_id. If wim_id is False, means not create wam_links
+ networks(list): Dicts containing the information about the networks
+ that will be instantiated to materialize a Network Service
+ (scenario) instance.
+ Corresponding to the ``instance_net`` record.
+
+ Returns:
+ list: list of WAN links to be written to the database
+ """
+ # Group networks by key=(instance_scenario_id, sce_net_id)
+ related = None
+ if networks:
+ related = networks[0].get("related")
+ filtered = _filter_multi_vim(networks)
+ grouped_networks = _group_networks(filtered)
+ datacenters_per_group = _count_datacenters(grouped_networks)
+ # For each group count the number of networks. If greater then 1,
+ # we have to create a wan link connecting them.
+ wan_groups = [key
+ for key, counter in datacenters_per_group
+ if counter > 1]
+ # Keys are tuples(instance_scenario_id, sce_net_id)
+ return [
+ self.derive_wan_link(wim_usage,
+ key[0], key[1], grouped_networks[key], tenant, related)
+ for key in wan_groups if wim_usage.get(key[1]) is not False
+ ]
+
+ def create_action(self, wan_link):
+ """For a single wan_link create the corresponding create action"""
+ return {
+ 'action': 'CREATE',
+ 'status': 'SCHEDULED',
+ 'item': 'instance_wim_nets',
+ 'item_id': wan_link['uuid'],
+ 'wim_account_id': wan_link['wim_account_id']
+ }
+
+ def create_actions(self, wan_links):
+ """For an array of wan_links, create all the corresponding actions"""
+ return [self.create_action(li) for li in wan_links]
+
+ def delete_action(self, wan_link):
+ """For a single wan_link create the corresponding create action"""
+ return {
+ 'action': 'DELETE',
+ 'status': 'SCHEDULED',
+ 'item': 'instance_wim_nets',
+ 'item_id': wan_link['uuid'],
+ 'wim_account_id': wan_link['wim_account_id'],
+ 'extra': json.dumps({'wan_link': wan_link})
+ # We serialize and cache the wan_link here, because it can be
+ # deleted during the delete process
+ }
+
+ def delete_actions(self, wan_links=(), instance_scenario_id=None):
+ """Given a Instance Scenario, remove all the WAN Links created in the
+ past"""
+ if instance_scenario_id:
+ wan_links = self.persist.get_wan_links(
+ instance_scenario_id=instance_scenario_id, sdn='false')
+ return [self.delete_action(li) for li in wan_links]
+
+ def incorporate_actions(self, wim_actions, instance_action):
+ """Make the instance action consider new WIM actions and make the WIM
+ actions aware of the instance action
+ """
+ current = instance_action.setdefault('number_tasks', 0)
+ for i, action in enumerate(wim_actions):
+ action['task_index'] = current + i
+ action['instance_action_id'] = instance_action['uuid']
+ instance_action['number_tasks'] += len(wim_actions)
+
+ return wim_actions, instance_action
+
+ def dispatch(self, tasks):
+ """Enqueue a list of tasks for further processing.
+
+ This function is supposed to be called outside from the WIM Thread.
+ """
+ for task in tasks:
+ if task['wim_account_id'] not in self.threads:
+ error_msg = str(WimAccountNotActive(
+ 'Requests send to the WIM Account %s are not currently '
+ 'being processed.', task['wim_account_id']))
+ Action(task, self.logger).fail(self.persist, error_msg)
+ self.persist.update_wan_link(task['item_id'],
+ {'status': 'ERROR',
+ 'error_msg': error_msg})
+ self.logger.error('Task %s %s %s not dispatched.\n%s',
+ task['action'], task['item'],
+ task['instance_account_id'], error_msg)
+ else:
+ self.threads[task['wim_account_id']].insert_task(task)
+ self.logger.debug('Task %s %s %s dispatched',
+ task['action'], task['item'],
+ task['instance_action_id'])
+
+ def _spawn_thread(self, wim_account):
+ """Spawn a WIM thread
+
+ Arguments:
+ wim_account (dict): WIM information (usually persisted)
+ The `wim` field is required to be set with a valid WIM record
+ inside the `wim_account` dict
+
+ Return:
+ threading.Thread: Thread object
+ """
+ thread = None
+ try:
+ thread = WimThread(self.persist, self.plugins, wim_account, ovim=self.ovim)
+ self.threads[wim_account['uuid']] = thread
+ thread.start()
+ except: # noqa
+ self.logger.error('Error when spawning WIM thread for %s',
+ wim_account['uuid'], exc_info=True)
+
+ return thread
+
+ def start_threads(self):
+ """Start the threads responsible for processing WIM Actions"""
+ accounts = self.persist.get_wim_accounts(error_if_none=False)
+ thread_dict = {}
+ for account in accounts:
+ try:
+ plugin_name = "rosdn_" + account["wim"]["type"]
+ if plugin_name not in self.plugins:
+ self._load_plugin(plugin_name, type="sdn")
+ thread_dict[account["uuid"]] = self._spawn_thread(account)
+ except UndefinedWimConnector as e:
+ self.logger.error(e)
+ self.threads = remove_none_items(thread_dict)
+
+ def stop_threads(self):
+ """Stop the threads responsible for processing WIM Actions"""
+ for uuid, thread in self.threads.items():
+ thread.exit()
+ self.threads.clear()
+
+ @contextmanager
+ def threads_running(self):
+ """Ensure no thread will be left running"""
+ # This method is particularly important for testing :)
+ try:
+ self.start_threads()
+ yield
+ finally:
+ self.stop_threads()
+
+
+def _filter_multi_vim(networks):
+ """Ignore networks without sce_net_id (all VNFs go to the same VIM)"""
+ return [n for n in networks if 'sce_net_id' in n and n['sce_net_id']]
+
+
+def _group_networks(networks):
+ """Group networks that correspond to the same instance_scenario_id and
+ sce_net_id (NSR and VLD).
+
+ Arguments:
+ networks(list): Dicts containing the information about the networks
+ that will be instantiated to materialize a Network Service
+ (scenario) instance.
+ Returns:
+ dict: Keys are tuples (instance_scenario_id, sce_net_id) and values
+ are list of networks.
+ """
+ criteria = itemgetter('instance_scenario_id', 'sce_net_id')
+
+ networks = sorted(networks, key=criteria)
+ return {k: list(v) for k, v in groupby(networks, key=criteria)}
+
+
+def _count_datacenters(grouped_networks):
+ """Count the number of datacenters in each group of networks
+
+ Returns:
+ list of tuples: the first element is the group key, while the second
+ element is the number of datacenters in each group.
+ """
+ return ((key, len(set(n['datacenter_id'] for n in group)))
+ for key, group in grouped_networks.items())
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+import queue
+
+from ..db_base import db_base_Exception as DbBaseException
+from ..http_tools.errors import (
+ Bad_Request,
+ Conflict,
+ HttpMappedError,
+ Internal_Server_Error,
+ Not_Found
+)
+
+
+class NoRecordFound(DbBaseException):
+ """No record was found in the database"""
+
+ def __init__(self, criteria, table=None):
+ table_info = '{} - '.format(table) if table else ''
+ super(NoRecordFound, self).__init__(
+ '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+ http_code=Not_Found)
+
+
+class MultipleRecordsFound(DbBaseException):
+ """More than one record was found in the database"""
+
+ def __init__(self, criteria, table=None):
+ table_info = '{} - '.format(table) if table else ''
+ super(MultipleRecordsFound, self).__init__(
+ '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+ http_code=Conflict)
+
+
+class WimAndTenantNotAttached(DbBaseException):
+ """Wim and Tenant are not attached"""
+
+ def __init__(self, wim, tenant):
+ super(WimAndTenantNotAttached, self).__init__(
+ '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+ http_code=Conflict)
+
+
+class WimAndTenantAlreadyAttached(DbBaseException):
+ """There is already a wim account attaching the given wim and tenant"""
+
+ def __init__(self, wim, tenant):
+ super(WimAndTenantAlreadyAttached, self).__init__(
+ '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+ http_code=Conflict)
+
+
+class NoWimConnectedToDatacenters(NoRecordFound):
+ """No WIM that is able to connect the given datacenters was found"""
+
+
+class InvalidParameters(DbBaseException):
+ """The given parameters are invalid"""
+
+ def __init__(self, message, http_code=Bad_Request):
+ super(InvalidParameters, self).__init__(message, http_code)
+
+
+class UndefinedAction(HttpMappedError):
+ """No action found"""
+
+ def __init__(self, item_type, action, http_code=Internal_Server_Error):
+ message = ('The action {} {} is not defined'.format(action, item_type))
+ super(UndefinedAction, self).__init__(message, http_code)
+
+
+class UndefinedWimConnector(DbBaseException):
+ """The connector class for the specified wim type is not implemented"""
+
+ def __init__(self, wim_type, module_name):
+ super(UndefinedWimConnector, self).__init__("Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has"
+ " not been registered".format(t=wim_type, n=module_name),
+ http_code=Bad_Request)
+
+
+class WimAccountOverwrite(DbBaseException):
+ """An attempt to overwrite an existing WIM account was identified"""
+
+ def __init__(self, wim_account, diff=None, tip=None):
+ message = self.__class__.__doc__
+ account_info = (
+ 'Account -- name: {name}, uuid: {uuid}'.format(**wim_account)
+ if wim_account else '')
+ diff_info = (
+ 'Differing fields: ' + ', '.join(diff.keys()) if diff else '')
+
+ super(WimAccountOverwrite, self).__init__(
+ '\n'.join(m for m in (message, account_info, diff_info, tip) if m),
+ http_code=Conflict)
+
+
+class UnexpectedDatabaseError(DbBaseException):
+ """The database didn't raised an exception but also the query was not
+ executed (maybe the connection had some problems?)
+ """
+
+
+class UndefinedUuidOrName(DbBaseException):
+ """Trying to query for a record using an empty uuid or name"""
+
+ def __init__(self, table=None):
+ table_info = '{} - '.format(table.split()[0]) if table else ''
+ super(UndefinedUuidOrName, self).__init__(
+ table_info + self.__class__.__doc__, http_status=Bad_Request)
+
+
+class UndefinedWanMappingType(InvalidParameters):
+ """The dict service_mapping_info MUST contain a `type` field"""
+
+ def __init__(self, given):
+ super(UndefinedWanMappingType, self).__init__(
+ '{}. Given: `{}`'.format(self.__class__.__doc__, given))
+
+
+class QueueFull(HttpMappedError, queue.Full):
+ """Thread queue is full"""
+
+ def __init__(self, thread_name, http_code=Internal_Server_Error):
+ message = ('Thread {} queue is full'.format(thread_name))
+ super(QueueFull, self).__init__(message, http_code)
+
+
+class InconsistentState(HttpMappedError):
+ """An unexpected inconsistency was find in the state of the program"""
+
+ def __init__(self, arg, http_code=Internal_Server_Error):
+ if isinstance(arg, HttpMappedError):
+ http_code = arg.http_code
+ message = str(arg)
+ else:
+ message = arg
+
+ super(InconsistentState, self).__init__(message, http_code)
+
+
+class WimAccountNotActive(HttpMappedError, KeyError):
+ """WIM Account is not active yet (no thread is running)"""
+
+ def __init__(self, message, http_code=Internal_Server_Error):
+ message += ('\nThe thread responsible for processing the actions have '
+ 'suddenly stopped, or have never being spawned')
+ super(WimAccountNotActive, self).__init__(message, http_code)
+
+
+class NoExternalPortFound(HttpMappedError):
+ """No external port associated to the instance_net"""
+
+ def __init__(self, instance_net):
+ super(NoExternalPortFound, self).__init__(
+ '{} uuid({})'.format(self.__class__.__doc__, instance_net['uuid']),
+ http_code=Not_Found)
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module works as an extension to the toplevel ``httpserver`` module,
+implementing callbacks for the HTTP routes related to the WIM features of OSM.
+
+Acting as a front-end, it is responsible for converting the HTTP request
+payload into native python objects, calling the correct engine methods
+and converting back the response objects into strings to be send in the HTTP
+response payload.
+
+Direct domain/persistence logic should be avoided in this file, instead
+calls to other layers should be done.
+"""
+import logging
+
+from bottle import request
+
+from .. import utils
+from ..http_tools.errors import ErrorHandler
+from ..http_tools.handler import BaseHandler, route
+from ..http_tools.request_processing import (
+ filter_query_string,
+ format_in,
+ format_out
+)
+from .engine import WimEngine
+from .persistence import WimPersistence
+from .schemas import (
+ wim_account_schema,
+ wim_edit_schema,
+ wim_port_mapping_schema,
+ wim_schema
+)
+
+
+class WimHandler(BaseHandler):
+ """HTTP route implementations for WIM related URLs
+
+ Arguments:
+ db: instance of mydb [optional]. This argument must be provided
+ if not ``persistence`` is passed
+ persistence (WimPersistence): High-level data storage abstraction
+ [optional]. If this argument is not present, ``db`` must be.
+ engine (WimEngine): Implementation of the business logic
+ for the engine of WAN networks
+ logger (logging.Logger): logger object [optional]
+ url_base(str): Path fragment to be prepended to the routes [optional]
+ plugins(list): List of bottle plugins to be applied to routes
+ [optional]
+ """
+ def __init__(self, db=None, persistence=None, engine=None,
+ url_base='', logger=None, plugins=()):
+ self.persist = persistence or WimPersistence(db)
+ self.engine = engine or WimEngine(self.persist)
+ self.url_base = url_base
+ self.logger = logger or logging.getLogger('openmano.wim.http')
+ error_handler = ErrorHandler(self.logger)
+ self.plugins = [error_handler] + list(plugins)
+
+ @route('GET', '/<tenant_id>/wims')
+ def http_list_wims(self, tenant_id):
+ allowed_fields = ('uuid', 'name', 'wim_url', 'type', 'created_at')
+ select_, where_, limit_ = filter_query_string(
+ request.query, None, allowed_fields)
+ # ^ Since we allow the user to customize the db query using the HTTP
+ # query and it is quite difficult to re-use this query, let's just
+ # do a ad-hoc call to the db
+
+ from_ = 'wims'
+ if tenant_id != 'any':
+ where_['nfvo_tenant_id'] = tenant_id
+ if 'created_at' in select_:
+ select_[select_.index('created_at')] = (
+ 'w.created_at as created_at')
+ if 'created_at' in where_:
+ where_['w.created_at'] = where_.pop('created_at')
+ from_ = ('wims as w join wim_nfvo_tenants as wt '
+ 'on w.uuid=wt.wim_id')
+
+ wims = self.persist.query(
+ FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_,
+ error_if_none=False)
+
+ utils.convert_float_timestamp2str(wims)
+ return format_out({'wims': wims})
+
+ @route('GET', '/<tenant_id>/wims/<wim_id>')
+ def http_get_wim(self, tenant_id, wim_id):
+ tenant_id = None if tenant_id == 'any' else tenant_id
+ wim = self.engine.get_wim(wim_id, tenant_id)
+ mappings = self.engine.get_wim_port_mappings(wim_id)
+ wim['config'] = utils.merge_dicts(wim.get('config', {}) or {},
+ wim_port_mapping=mappings)
+ return format_out({'wim': wim})
+
+ @route('POST', '/wims')
+ def http_create_wim(self):
+ http_content, _ = format_in(wim_schema, confidential_data=True)
+ r = utils.remove_extra_items(http_content, wim_schema)
+ if r:
+ self.logger.debug("Remove extra items received %r", r)
+ data = self.engine.create_wim(http_content['wim'])
+ return self.http_get_wim('any', data)
+
+ @route('PUT', '/wims/<wim_id>')
+ def http_update_wim(self, wim_id):
+ '''edit wim details, can use both uuid or name'''
+ # parse input data
+ http_content, _ = format_in(wim_edit_schema)
+ r = utils.remove_extra_items(http_content, wim_edit_schema)
+ if r:
+ self.logger.debug("Remove received extra items %s", r)
+
+ wim_id = self.engine.update_wim(wim_id, http_content['wim'])
+ return self.http_get_wim('any', wim_id)
+
+ @route('DELETE', '/wims/<wim_id>')
+ def http_delete_wim(self, wim_id):
+ """Delete a wim from a database, can use both uuid or name"""
+ data = self.engine.delete_wim(wim_id)
+ # TODO Remove WIM in orchestrator
+ return format_out({"result": "wim '" + data + "' deleted"})
+
+ @route('POST', '/<tenant_id>/wims/<wim_id>')
+ def http_create_wim_account(self, tenant_id, wim_id):
+ """Associate an existing wim to this tenant"""
+ # parse input data
+ http_content, _ = format_in(
+ wim_account_schema, confidential_data=True)
+ removed = utils.remove_extra_items(http_content, wim_account_schema)
+ removed and self.logger.debug("Remove extra items %r", removed)
+ account = self.engine.create_wim_account(
+ wim_id, tenant_id, http_content['wim_account'])
+ # check update succeeded
+ return format_out({"wim_account": account})
+
+ @route('PUT', '/<tenant_id>/wims/<wim_id>')
+ def http_update_wim_accounts(self, tenant_id, wim_id):
+ """Edit the association of an existing wim to this tenant"""
+ tenant_id = None if tenant_id == 'any' else tenant_id
+ # parse input data
+ http_content, _ = format_in(
+ wim_account_schema, confidential_data=True)
+ removed = utils.remove_extra_items(http_content, wim_account_schema)
+ removed and self.logger.debug("Remove extra items %r", removed)
+ accounts = self.engine.update_wim_accounts(
+ wim_id, tenant_id, http_content['wim_account'])
+
+ if tenant_id:
+ return format_out({'wim_account': accounts[0]})
+
+ return format_out({'wim_accounts': accounts})
+
+ @route('DELETE', '/<tenant_id>/wims/<wim_id>')
+ def http_delete_wim_accounts(self, tenant_id, wim_id):
+ """Deassociate an existing wim to this tenant"""
+ tenant_id = None if tenant_id == 'any' else tenant_id
+ accounts = self.engine.delete_wim_accounts(wim_id, tenant_id,
+ error_if_none=True)
+
+ properties = (
+ (account['name'], wim_id,
+ utils.safe_get(account, 'association.nfvo_tenant_id', tenant_id))
+ for account in accounts)
+
+ return format_out({
+ 'result': '\n'.join('WIM account `{}` deleted. '
+ 'Tenant `{}` detached from WIM `{}`'
+ .format(*p) for p in properties)
+ })
+
+ @route('POST', '/<tenant_id>/wims/<wim_id>/port_mapping')
+ def http_create_wim_port_mappings(self, tenant_id, wim_id):
+ """Set the wim port mapping for a wim"""
+ # parse input data
+ http_content, _ = format_in(wim_port_mapping_schema)
+
+ data = self.engine.create_wim_port_mappings(
+ wim_id, http_content['wim_port_mapping'], tenant_id)
+ return format_out({"wim_port_mapping": data})
+
+ @route('GET', '/<tenant_id>/wims/<wim_id>/port_mapping')
+ def http_get_wim_port_mappings(self, tenant_id, wim_id):
+ """Get wim port mapping details"""
+ # TODO: tenant_id is never used, so it should be removed
+ data = self.engine.get_wim_port_mappings(wim_id)
+ return format_out({"wim_port_mapping": data})
+
+ @route('DELETE', '/<tenant_id>/wims/<wim_id>/port_mapping')
+ def http_delete_wim_port_mappings(self, tenant_id, wim_id):
+ """Clean wim port mapping"""
+ # TODO: tenant_id is never used, so it should be removed
+ data = self.engine.delete_wim_port_mappings(wim_id)
+ return format_out({"result": data})
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains only logic related to managing records in a database
+which includes data format normalization, data format validation and etc.
+(It works as an extension to `nfvo_db.py` for the WIM feature)
+
+No domain logic/architectural concern should be present in this file.
+"""
+import json
+import logging
+import string
+from contextlib import contextmanager
+from hashlib import sha1
+from itertools import groupby
+from operator import itemgetter
+# from sys import exc_info
+# from time import time
+from uuid import uuid1 as generate_uuid
+
+import yaml
+
+from ..utils import (
+ check_valid_uuid,
+ convert_float_timestamp2str,
+ expand_joined_fields,
+ filter_dict_keys,
+ filter_out_dict_keys,
+ merge_dicts,
+ remove_none_items
+)
+from .errors import (
+ DbBaseException,
+ InvalidParameters,
+ MultipleRecordsFound,
+ NoRecordFound,
+ UndefinedUuidOrName,
+ UndefinedWanMappingType,
+ UnexpectedDatabaseError,
+ WimAccountOverwrite,
+ WimAndTenantAlreadyAttached
+)
+
+_WIM = 'wims AS wim '
+
+_WIM_JOIN = (
+ _WIM +
+ ' JOIN wim_nfvo_tenants AS association '
+ ' ON association.wim_id=wim.uuid '
+ ' JOIN nfvo_tenants AS nfvo_tenant '
+ ' ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+ ' JOIN wim_accounts AS wim_account '
+ ' ON association.wim_account_id=wim_account.uuid '
+)
+
+_WIM_ACCOUNT_JOIN = (
+ 'wim_accounts AS wim_account '
+ ' JOIN wim_nfvo_tenants AS association '
+ ' ON association.wim_account_id=wim_account.uuid '
+ ' JOIN wims AS wim '
+ ' ON association.wim_id=wim.uuid '
+ ' JOIN nfvo_tenants AS nfvo_tenant '
+ ' ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_DATACENTER_JOIN = (
+ 'datacenters AS datacenter '
+ ' JOIN tenants_datacenters AS association '
+ ' ON association.datacenter_id=datacenter.uuid '
+ ' JOIN datacenter_tenants as datacenter_account '
+ ' ON association.datacenter_tenant_id=datacenter_account.uuid '
+ ' JOIN nfvo_tenants AS nfvo_tenant '
+ ' ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_PORT_MAPPING = 'wim_port_mappings as wim_port_mapping '
+
+_PORT_MAPPING_JOIN_WIM = (
+ ' JOIN wims as wim '
+ ' ON wim_port_mapping.wim_id=wim.uuid '
+)
+
+_PORT_MAPPING_JOIN_DATACENTER = (
+ ' JOIN datacenters as datacenter '
+ ' ON wim_port_mapping.datacenter_id=datacenter.uuid '
+)
+
+_WIM_SELECT = [
+ 'wim.{0} as {0}'.format(_field)
+ for _field in 'uuid name description wim_url type config '
+ 'created_at modified_at'.split()
+]
+
+_WIM_ACCOUNT_SELECT = 'uuid name user password config'.split()
+
+_PORT_MAPPING_SELECT = ('wim_port_mapping.*', )
+
+_CONFIDENTIAL_FIELDS = ('password', 'passwd')
+
+_SERIALIZED_FIELDS = ('config', 'vim_info', 'wim_info', 'conn_info', 'extra',
+ 'service_mapping_info')
+
+UNIQUE_PORT_MAPPING_INFO_FIELDS = {
+ 'dpid-port': ('switch_dpid', 'switch_port')
+}
+"""Fields that should be unique for each port mapping that relies on
+service_mapping_info.
+
+For example, for port mappings of type 'dpid-port', each combination of
+switch_dpid and switch_port should be unique (the same switch cannot
+be connected to two different places using the same port)
+"""
+
+
+class WimPersistence(object):
+ """High level interactions with the WIM tables in the database"""
+
+ def __init__(self, db, logger=None):
+ self.db = db
+ self.logger = logger or logging.getLogger('openmano.wim.persistence')
+
+ def query(self,
+ FROM=None,
+ SELECT=None,
+ WHERE=None,
+ ORDER_BY=None,
+ LIMIT=None,
+ OFFSET=None,
+ error_if_none=True,
+ error_if_multiple=False,
+ postprocess=None,
+ hide=_CONFIDENTIAL_FIELDS,
+ **kwargs):
+ """Retrieve records from the database.
+
+ Keyword Arguments:
+ SELECT, FROM, WHERE, LIMIT, ORDER_BY: used to compose the SQL
+ query. See ``nfvo_db.get_rows``.
+ OFFSET: only valid when used togheter with LIMIT.
+ Ignore the OFFSET first results of the query.
+ error_if_none: by default an error is raised if no record is
+ found. With this option it is possible to disable this error.
+ error_if_multiple: by default no error is raised if more then one
+ record is found.
+ With this option it is possible to enable this error.
+ postprocess: function applied to every retrieved record.
+ This function receives a dict as input and must return it
+ after modifications. Moreover this function should accept a
+ second optional parameter ``hide`` indicating
+ the confidential fiels to be obfuscated.
+ By default a minimal postprocessing function is applied,
+ obfuscating confidential fields and converting timestamps.
+ hide: option proxied to postprocess
+
+ All the remaining keyword arguments will be assumed to be ``name``s or
+ ``uuid``s to compose the WHERE statement, according to their format.
+ If the value corresponds to an array, the first element will determine
+ if it is an name or UUID.
+
+ For example:
+ - ``wim="abcdef"``` will be turned into ``wim.name="abcdef"``,
+ - ``datacenter="5286a274-8a1b-4b8d-a667-9c94261ad855"``
+ will be turned into
+ ``datacenter.uuid="5286a274-8a1b-4b8d-a667-9c94261ad855"``.
+ - ``wim=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+ will be turned into
+ ``wim.uuid=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+
+ Raises:
+ NoRecordFound: if the query result set is empty
+ DbBaseException: errors occuring during the execution of the query.
+ """
+ # Defaults:
+ postprocess = postprocess or _postprocess_record
+ WHERE = WHERE or {}
+
+ # Find remaining keywords by name or uuid
+ WHERE.update(_compose_where_from_uuids_or_names(**kwargs))
+ WHERE = WHERE or None
+ # ^ If the where statement is empty, it is better to leave it as None,
+ # so it can be filtered out at a later stage
+ LIMIT = ('{:d},{:d}'.format(OFFSET, LIMIT)
+ if LIMIT and OFFSET else LIMIT)
+
+ query = remove_none_items({
+ 'SELECT': SELECT, 'FROM': FROM, 'WHERE': WHERE,
+ 'LIMIT': LIMIT, 'ORDER_BY': ORDER_BY})
+
+ records = self.db.get_rows(**query)
+
+ table = FROM.split()[0]
+ if error_if_none and not records:
+ raise NoRecordFound(WHERE, table)
+
+ if error_if_multiple and len(records) > 1:
+ self.logger.error('Multiple records '
+ 'FROM %s WHERE %s:\n\n%s\n\n',
+ FROM, WHERE, json.dumps(records, indent=4))
+ raise MultipleRecordsFound(WHERE, table)
+
+ return [
+ expand_joined_fields(postprocess(record, hide))
+ for record in records
+ ]
+
+ def query_one(self, *args, **kwargs):
+ """Similar to ``query``, but ensuring just one result.
+ ``error_if_multiple`` is enabled by default.
+ """
+ kwargs.setdefault('error_if_multiple', True)
+ records = self.query(*args, **kwargs)
+ return records[0] if records else None
+
+ def get_by_uuid(self, table, uuid, **kwargs):
+ """Retrieve one record from the database based on its uuid
+
+ Arguments:
+ table (str): table name (to be used in SQL's FROM statement).
+ uuid (str): unique identifier for record.
+
+ For additional keyword arguments and exceptions see :obj:`~.query`
+ (``error_if_multiple`` is enabled by default).
+ """
+ if uuid is None:
+ raise UndefinedUuidOrName(table)
+ return self.query_one(table, WHERE={'uuid': uuid}, **kwargs)
+
+ def get_by_name_or_uuid(self, table, uuid_or_name, **kwargs):
+ """Retrieve a record from the database based on a value that can be its
+ uuid or name.
+
+ Arguments:
+ table (str): table name (to be used in SQL's FROM statement).
+ uuid_or_name (str): this value can correspond to either uuid or
+ name
+ For additional keyword arguments and exceptions see :obj:`~.query`
+ (``error_if_multiple`` is enabled by default).
+ """
+ if uuid_or_name is None:
+ raise UndefinedUuidOrName(table)
+
+ key = 'uuid' if check_valid_uuid(uuid_or_name) else 'name'
+ return self.query_one(table, WHERE={key: uuid_or_name}, **kwargs)
+
+ def get_wims(self, uuid_or_name=None, tenant=None, **kwargs):
+ """Retrieve information about one or more WIMs stored in the database
+
+ Arguments:
+ uuid_or_name (str): uuid or name for WIM
+ tenant (str): [optional] uuid or name for NFVO tenant
+
+ See :obj:`~.query` for additional keyword arguments.
+ """
+ kwargs.update(wim=uuid_or_name, tenant=tenant)
+ from_ = _WIM_JOIN if tenant else _WIM
+ select_ = _WIM_SELECT[:] + (['wim_account.*'] if tenant else [])
+
+ kwargs.setdefault('SELECT', select_)
+ return self.query(from_, **kwargs)
+
+ def get_wim(self, wim, tenant=None, **kwargs):
+ """Similar to ``get_wims`` but ensure only one result is returned"""
+ kwargs.setdefault('error_if_multiple', True)
+ return self.get_wims(wim, tenant)[0]
+
+ def create_wim(self, wim_descriptor):
+ """Create a new wim record inside the database and returns its uuid
+
+ Arguments:
+ wim_descriptor (dict): properties of the record
+ (usually each field corresponds to a database column, but extra
+ information can be offloaded to another table or serialized as
+ JSON/YAML)
+ Returns:
+ str: UUID of the created WIM
+ """
+ if "config" in wim_descriptor:
+ wim_descriptor["config"] = _serialize(wim_descriptor["config"])
+
+ url = wim_descriptor["wim_url"]
+ wim_descriptor["wim_url"] = url.strip(string.whitespace + "/")
+ # ^ This avoid the common problem caused by trailing spaces/slashes in
+ # the URL (due to CTRL+C/CTRL+V)
+
+ return self.db.new_row(
+ "wims", wim_descriptor, add_uuid=True, confidential_data=True)
+
+ def update_wim(self, uuid_or_name, wim_descriptor):
+ """Change an existing WIM record on the database"""
+ # obtain data, check that only one exist
+ wim = self.get_by_name_or_uuid('wims', uuid_or_name)
+
+ # edit data
+ wim_id = wim['uuid']
+ where = {'uuid': wim['uuid']}
+
+ # unserialize config, edit and serialize it again
+ new_config_dict = wim_descriptor.get('config', {}) or {}
+ config_dict = remove_none_items(merge_dicts(
+ wim.get('config', {}) or {}, new_config_dict))
+ wim_descriptor['config'] = (
+ _serialize(config_dict) if config_dict else None)
+
+ self.db.update_rows('wims', wim_descriptor, where)
+
+ return wim_id
+
+ def delete_wim(self, wim):
+ # get nfvo_tenant info
+ wim = self.get_by_name_or_uuid('wims', wim)
+
+ self.db.delete_row_by_id('wims', wim['uuid'])
+
+ return wim['uuid'] + ' ' + wim['name']
+
+ def get_wim_accounts_by(self, wim=None, tenant=None, uuid=None, **kwargs):
+ """Retrieve WIM account information from the database together
+ with the related records (wim, nfvo_tenant and wim_nfvo_tenant)
+
+ Arguments:
+ wim (str): uuid or name for WIM
+ tenant (str): [optional] uuid or name for NFVO tenant
+
+ See :obj:`~.query` for additional keyword arguments.
+ """
+ kwargs.update(wim=wim, tenant=tenant)
+ kwargs.setdefault('postprocess', _postprocess_wim_account)
+ if uuid:
+ kwargs.setdefault('WHERE', {'wim_account.uuid': uuid})
+ return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+ def get_wim_account_by(self, wim=None, tenant=None, uuid=None, **kwargs):
+ """Similar to ``get_wim_accounts_by``, but ensuring just one result"""
+ kwargs.setdefault('error_if_multiple', True)
+ return self.get_wim_accounts_by(wim, tenant, uuid, **kwargs)[0]
+
+ def get_wim_accounts(self, **kwargs):
+ """Retrieve all the accounts from the database"""
+ kwargs.setdefault('postprocess', _postprocess_wim_account)
+ kwargs.setdefault('WHERE', {"sdn": "false"})
+ return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+ def get_wim_account(self, uuid_or_name, **kwargs):
+ """Retrieve WIM Account record by UUID or name,
+ See :obj:`get_by_name_or_uuid` for keyword arguments.
+ """
+ kwargs.setdefault('postprocess', _postprocess_wim_account)
+ kwargs.setdefault('SELECT', _WIM_ACCOUNT_SELECT)
+ return self.get_by_name_or_uuid('wim_accounts', uuid_or_name, **kwargs)
+
+ @contextmanager
+ def _associate(self, wim_id, nfvo_tenant_id):
+ """Auxiliary method for ``create_wim_account``
+
+ This method just create a row in the association table
+ ``wim_nfvo_tenants``
+ """
+ try:
+ yield
+ except DbBaseException as db_exception:
+ error_msg = str(db_exception)
+ if all([msg in error_msg
+ for msg in ("already in use", "'wim_nfvo_tenant'")]):
+ ex = WimAndTenantAlreadyAttached(wim_id, nfvo_tenant_id)
+ raise ex from db_exception
+ raise
+
+ def create_wim_account(self, wim, tenant, properties):
+ """Associate a wim to a tenant using the ``wim_nfvo_tenants`` table
+ and create a ``wim_account`` to store credentials and configurations.
+
+ For the sake of simplification, we assume that each NFVO tenant can be
+ attached to a WIM using only one WIM account. This is automatically
+ guaranteed via database constraints.
+ For corner cases, the same WIM can be registered twice using another
+ name.
+
+ Arguments:
+ wim (str): name or uuid of the WIM related to the account being
+ created
+ tenant (str): name or uuid of the nfvo tenant to which the account
+ will be created
+ properties (dict): properties of the account
+ (eg. user, password, ...)
+ """
+ wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']
+ tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,
+ SELECT=['uuid', 'name'])
+ account = properties.setdefault('name', tenant['name'])
+
+ wim_account = self.query_one('wim_accounts',
+ WHERE={'wim_id': wim_id, 'name': account},
+ error_if_none=False)
+
+ transaction = []
+ used_uuids = []
+
+ if wim_account is None:
+ # If a row for the wim account doesn't exist yet, we need to
+ # create one, otherwise we can just re-use it.
+ account_id = str(generate_uuid())
+ used_uuids.append(account_id)
+ row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)
+ transaction.append({'wim_accounts': _preprocess_wim_account(row)})
+ else:
+ account_id = wim_account['uuid']
+ properties.pop('config', None) # Config is too complex to compare
+ diff = {k: v for k, v in properties.items() if v != wim_account[k]}
+ if diff:
+ tip = 'Edit the account first, and then attach it to a tenant'
+ raise WimAccountOverwrite(wim_account, diff, tip)
+
+ transaction.append({
+ 'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],
+ 'wim_id': wim_id,
+ 'wim_account_id': account_id}})
+
+ with self._associate(wim_id, tenant['uuid']):
+ self.db.new_rows(transaction, used_uuids, confidential_data=True)
+
+ return account_id
+
+ def update_wim_account(self, uuid, properties, hide=_CONFIDENTIAL_FIELDS):
+ """Update WIM account record by overwriting fields with new values
+
+ Specially for the field ``config`` this means that a new dict will be
+ merged to the existing one.
+
+ Attributes:
+ uuid (str): UUID for the WIM account
+ properties (dict): fields that should be overwritten
+
+ Returns:
+ Updated wim_account
+ """
+ wim_account = self.get_by_uuid('wim_accounts', uuid)
+ safe_fields = 'user password name created'.split()
+ updates = _preprocess_wim_account(
+ merge_dicts(wim_account, filter_dict_keys(properties, safe_fields))
+ )
+
+ if properties.get('config'):
+ old_config = wim_account.get('config') or {}
+ new_config = merge_dicts(old_config, properties['config'])
+ updates['config'] = _serialize(new_config)
+
+ num_changes = self.db.update_rows('wim_accounts', UPDATE=updates,
+ WHERE={'uuid': wim_account['uuid']})
+
+ if num_changes is None:
+ raise UnexpectedDatabaseError('Impossible to update wim_account '
+ '{name}:{uuid}'.format(*wim_account))
+
+ return self.get_wim_account(wim_account['uuid'], hide=hide)
+
+ def delete_wim_account(self, uuid):
+ """Remove WIM account record from the database"""
+ # Since we have foreign keys configured with ON CASCADE, we can rely
+ # on the database engine to guarantee consistency, deleting the
+ # dependant records
+ return self.db.delete_row_by_id('wim_accounts', uuid)
+
+ def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):
+ """Retrieve datacenter information from the database together
+ with the related records (nfvo_tenant)
+
+ Arguments:
+ datacenter (str): uuid or name for datacenter
+ tenant (str): [optional] uuid or name for NFVO tenant
+
+ See :obj:`~.query` for additional keyword arguments.
+ """
+ if tenant:
+ kwargs.update(datacenter=datacenter, tenant=tenant)
+ return self.query(_DATACENTER_JOIN, **kwargs)
+ else:
+ return [self.get_by_name_or_uuid('datacenters',
+ datacenter, **kwargs)]
+
+ def get_datacenter_by(self, datacenter=None, tenant=None, **kwargs):
+ """Similar to ``get_datacenters_by``, but ensuring just one result"""
+ kwargs.setdefault('error_if_multiple', True)
+ return self.get_datacenters_by(datacenter, tenant, **kwargs)[0]
+
+ def _create_single_port_mapping(self, properties):
+ info = properties.setdefault('service_mapping_info', {})
+ endpoint_id = properties.get('service_endpoint_id')
+
+ if info.get('mapping_type') and not endpoint_id:
+ properties['service_endpoint_id'] = (
+ self._generate_port_mapping_id(info))
+
+ properties['service_mapping_info'] = _serialize(info)
+
+ try:
+ self.db.new_row('wim_port_mappings', properties,
+ add_uuid=False, confidential_data=True)
+ except DbBaseException as old_exception:
+ self.logger.exception(old_exception)
+ ex = InvalidParameters(
+ "The mapping must contain the "
+ "'device_id', 'device_interface_id', and "
+ "service_mapping_info: "
+ "('switch_dpid' and 'switch_port') or "
+ "'service_endpoint_id}'")
+ raise ex from old_exception
+
+ return properties
+
+ def create_wim_port_mappings(self, wim, port_mappings, tenant=None):
+ if not isinstance(wim, dict):
+ wim = self.get_by_name_or_uuid('wims', wim)
+
+ for port_mapping in port_mappings:
+ port_mapping['wim_name'] = wim['name']
+ datacenter = self.get_datacenter_by(
+ port_mapping['datacenter_name'], tenant)
+ for pop_wan_port_mapping in port_mapping['pop_wan_mappings']:
+ element = merge_dicts(pop_wan_port_mapping, {
+ 'wim_id': wim['uuid'],
+ 'datacenter_id': datacenter['uuid']})
+ self._create_single_port_mapping(element)
+
+ return port_mappings
+
+ def _filter_port_mappings_by_tenant(self, mappings, tenant):
+ """Make sure all the datacenters and wims listed in the port mapping
+ belong to an specific tenant
+ """
+
+ # NOTE: Theoretically this could be done at SQL level, but given the
+ # number of tables involved (wim_port_mappings, wim_accounts,
+ # wims, wim_nfvo_tenants, datacenters, datacenter_tenants,
+ # tenants_datacents and nfvo_tenants), it would result in a
+ # extremely complex query. Moreover, the predicate can vary:
+ # for `get_wim_port_mappings` we can have any combination of
+ # (wim, datacenter, tenant), not all of them having the 3 values
+ # so we have combinatorial trouble to write the 'FROM' statement.
+
+ kwargs = {'tenant': tenant, 'error_if_none': False}
+ # Cache results to speedup things
+ datacenters = {}
+ wims = {}
+
+ def _get_datacenter(uuid):
+ return (
+ datacenters.get(uuid) or
+ datacenters.setdefault(
+ uuid, self.get_datacenters_by(uuid, **kwargs)))
+
+ def _get_wims(uuid):
+ return (wims.get(uuid) or
+ wims.setdefault(uuid, self.get_wims(uuid, **kwargs)))
+
+ return [
+ mapping
+ for mapping in mappings
+ if (_get_datacenter(mapping['datacenter_id']) and
+ _get_wims(mapping['wim_id']))
+ ]
+
+ def get_wim_port_mappings(self, wim=None, datacenter=None, tenant=None,
+ **kwargs):
+ """List all the port mappings, optionally filtering by wim, datacenter
+ AND/OR tenant
+ """
+ from_ = [_PORT_MAPPING,
+ _PORT_MAPPING_JOIN_WIM if wim else '',
+ _PORT_MAPPING_JOIN_DATACENTER if datacenter else '']
+
+ criteria = ('wim_id', 'datacenter_id')
+ kwargs.setdefault('error_if_none', False)
+ mappings = self.query(
+ ' '.join(from_),
+ SELECT=_PORT_MAPPING_SELECT,
+ ORDER_BY=['wim_port_mapping.{}'.format(c) for c in criteria],
+ wim=wim, datacenter=datacenter,
+ postprocess=_postprocess_wim_port_mapping,
+ **kwargs)
+
+ if tenant:
+ mappings = self._filter_port_mappings_by_tenant(mappings, tenant)
+
+ # We don't have to sort, since we have used 'ORDER_BY'
+ grouped_mappings = groupby(mappings, key=itemgetter(*criteria))
+
+ return [
+ {'wim_id': key[0],
+ 'datacenter_id': key[1],
+ 'pop_wan_mappings': [
+ filter_out_dict_keys(mapping, (
+ 'id', 'wim_id', 'datacenter_id',
+ 'created_at', 'modified_at'))
+ for mapping in group]}
+ for key, group in grouped_mappings
+ ]
+
+ def delete_wim_port_mappings(self, wim_id):
+ self.db.delete_row(FROM='wim_port_mappings', WHERE={"wim_id": wim_id})
+ return "port mapping for wim {} deleted.".format(wim_id)
+
+ def update_wim_port_mapping(self, id, properties):
+ original = self.query_one('wim_port_mappings', WHERE={'id': id})
+
+ mapping_info = remove_none_items(merge_dicts(
+ original.get('service_mapping_info') or {},
+ properties.get('service_mapping_info') or {}))
+
+ updates = preprocess_record(
+ merge_dicts(original, remove_none_items(properties),
+ service_mapping_info=mapping_info))
+
+ num_changes = self.db.update_rows('wim_port_mappings',
+ UPDATE=updates, WHERE={'id': id})
+
+ if num_changes is None:
+ raise UnexpectedDatabaseError(
+ 'Impossible to update wim_port_mappings {}:\n{}\n'.format(
+ id, _serialize(properties))
+ )
+
+ return num_changes
+
+ def get_actions_in_groups(self, wim_account_id,
+ item_types=('instance_wim_nets',),
+ group_offset=0, group_limit=150):
+ """Retrieve actions from the database in groups.
+ Each group contains all the actions that have the same ``item`` type
+ and ``item_id``.
+
+ Arguments:
+ wim_account_id: restrict the search to actions to be performed
+ using the same account
+ item_types (list): [optional] filter the actions to the given
+ item types
+ group_limit (int): maximum number of groups returned by the
+ function
+ group_offset (int): skip the N first groups. Used together with
+ group_limit for pagination purposes.
+
+ Returns:
+ List of groups, where each group is a tuple ``(key, actions)``.
+ In turn, ``key`` is a tuple containing the values of
+ ``(item, item_id)`` used to create the group and ``actions`` is a
+ list of ``vim_wim_actions`` records (dicts).
+ """
+
+ type_options = set(
+ '"{}"'.format(self.db.escape_string(t)) for t in item_types)
+
+ items = ('SELECT DISTINCT a.item, a.item_id, a.wim_account_id '
+ 'FROM vim_wim_actions AS a '
+ 'WHERE a.wim_account_id="{}" AND a.item IN ({}) '
+ 'ORDER BY a.item, a.item_id '
+ 'LIMIT {:d},{:d}').format(
+ self.safe_str(wim_account_id),
+ ','.join(type_options),
+ group_offset, group_limit)
+
+ join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
+ db_results = self.db.get_rows(
+ FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
+
+ results = (_postprocess_action(r) for r in db_results)
+ criteria = itemgetter('item', 'item_id')
+ return [(k, list(g)) for k, g in groupby(results, key=criteria)]
+
+ def update_action(self, instance_action_id, task_index, properties):
+ condition = {'instance_action_id': instance_action_id,
+ 'task_index': task_index}
+ try:
+ action = self.query_one('vim_wim_actions', WHERE=condition)
+ except Exception:
+ actions = self.query('vim_wim_actions', WHERE=condition)
+ self.logger.error('More then one action found:\n%s',
+ json.dumps(actions, indent=4))
+ action = actions[0]
+
+ extra = remove_none_items(merge_dicts(
+ action.get('extra') or {},
+ properties.get('extra') or {}))
+
+ updates = preprocess_record(
+ merge_dicts(action, properties, extra=extra))
+
+ num_changes = self.db.update_rows('vim_wim_actions', UPDATE=updates, WHERE=condition)
+
+ if num_changes is None:
+ raise UnexpectedDatabaseError(
+ 'Impossible to update vim_wim_actions '
+ '{instance_action_id}[{task_index}]'.format(*action))
+
+ return num_changes
+
+ def get_wan_links(self, uuid=None, **kwargs):
+ """Retrieve WAN link records from the database
+
+ Keyword Arguments:
+ uuid, instance_scenario_id, sce_net_id, wim_id, wim_account_id:
+ attributes that can be used at the WHERE clause
+ """
+ kwargs.setdefault('uuid', uuid)
+ kwargs.setdefault('error_if_none', False)
+
+ criteria_fields = ('uuid', 'instance_scenario_id', 'sce_net_id',
+ 'wim_id', 'wim_account_id', 'sdn')
+ criteria = remove_none_items(filter_dict_keys(kwargs, criteria_fields))
+ kwargs = filter_out_dict_keys(kwargs, criteria_fields)
+
+ return self.query('instance_wim_nets', WHERE=criteria, **kwargs)
+
+ def update_wan_link(self, uuid, properties):
+ wan_link = self.get_by_uuid('instance_wim_nets', uuid)
+
+ wim_info = remove_none_items(merge_dicts(
+ wan_link.get('wim_info') or {},
+ properties.get('wim_info') or {}))
+
+ updates = preprocess_record(
+ merge_dicts(wan_link, properties, wim_info=wim_info))
+
+ self.logger.debug({'UPDATE': updates})
+ num_changes = self.db.update_rows(
+ 'instance_wim_nets', UPDATE=updates,
+ WHERE={'uuid': wan_link['uuid']})
+
+ if num_changes is None:
+ raise UnexpectedDatabaseError(
+ 'Impossible to update instance_wim_nets ' + wan_link['uuid'])
+
+ return num_changes
+
+ def get_instance_nets(self, instance_scenario_id, sce_net_id, **kwargs):
+ """Retrieve all the instance nets related to the same instance_scenario
+ and scenario network
+ """
+ return self.query(
+ 'instance_nets',
+ WHERE={'instance_scenario_id': instance_scenario_id,
+ 'sce_net_id': sce_net_id},
+ ORDER_BY=kwargs.pop(
+ 'ORDER_BY', ('instance_scenario_id', 'sce_net_id')),
+ **kwargs)
+
+ def update_instance_action_counters(self, uuid, failed=None, done=None):
+ """Atomically increment/decrement number_done and number_failed fields
+ in the instance action table
+ """
+ changes = remove_none_items({
+ 'number_failed': failed and {'INCREMENT': failed},
+ 'number_done': done and {'INCREMENT': done}
+ })
+
+ if not changes:
+ return 0
+
+ return self.db.update_rows('instance_actions', WHERE={'uuid': uuid}, UPDATE=changes)
+
+ def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
+ """Return an instance VM if that is the only VM connected to an
+ external network identified by instance_net_id
+ """
+ counting = ('SELECT DISTINCT instance_net_id '
+ 'FROM instance_interfaces '
+ 'WHERE instance_net_id="{}" AND type="external" '
+ 'GROUP BY instance_net_id '
+ 'HAVING COUNT(*)=1').format(self.safe_str(instance_net_id))
+
+ vm_item = ('SELECT DISTINCT instance_vm_id '
+ 'FROM instance_interfaces NATURAL JOIN ({}) AS a'
+ .format(counting))
+
+ return self.query_one(
+ 'instance_vms JOIN ({}) as instance_interface '
+ 'ON instance_vms.uuid=instance_interface.instance_vm_id'
+ .format(vm_item), **kwargs)
+
+ def safe_str(self, string):
+ """Return a SQL safe string"""
+ return self.db.escape_string(string)
+
+ def reconnect(self):
+ self.db.reconnect()
+
+ def _generate_port_mapping_id(self, mapping_info):
+ """Given a port mapping represented by a dict with a 'type' field,
+ generate a unique string, in a injective way.
+ """
+ mapping_info = mapping_info.copy() # Avoid mutating original object
+ mapping_type = mapping_info.pop('mapping_type', None)
+ if not mapping_type:
+ raise UndefinedWanMappingType(mapping_info)
+
+ unique_fields = UNIQUE_PORT_MAPPING_INFO_FIELDS.get(mapping_type)
+
+ if unique_fields:
+ mapping_info = filter_dict_keys(mapping_info, unique_fields)
+ else:
+ self.logger.warning('Unique fields for WIM port mapping of type '
+ '%s not defined. Please add a list of fields '
+ 'which combination should be unique in '
+ 'UNIQUE_PORT_MAPPING_INFO_FIELDS '
+ '(`wim/persistency.py) ', mapping_type)
+
+ repeatable_repr = json.dumps(mapping_info, encoding='utf-8',
+ sort_keys=True, indent=False)
+
+ return ':'.join([mapping_type, _str2id(repeatable_repr)])
+
+
+def _serialize(value):
+ """Serialize an arbitrary value in a consistent way,
+ so it can be stored in a database inside a text field
+ """
+ return yaml.safe_dump(value, default_flow_style=True, width=256)
+
+
+def _unserialize(text):
+ """Unserialize text representation into an arbitrary value,
+ so it can be loaded from the database
+ """
+ return yaml.safe_load(text)
+
+
+def preprocess_record(record):
+ """Small transformations to be applied to the data that cames from the
+ user before writing it to the database. By default, filter out timestamps,
+ and serialize the ``config`` field.
+ """
+ automatic_fields = ['created_at', 'modified_at']
+ record = serialize_fields(filter_out_dict_keys(record, automatic_fields))
+
+ return record
+
+
+def _preprocess_wim_account(wim_account):
+ """Do the default preprocessing and convert the 'created' field from
+ boolean to string
+ """
+ wim_account = preprocess_record(wim_account)
+
+ wim_account['sdn'] = False
+ return wim_account
+
+
+def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):
+ """By default, hide passwords fields, unserialize ``config`` fields, and
+ convert float timestamps to strings
+ """
+ record = hide_confidential_fields(record, hide)
+ record = unserialize_fields(record, hide)
+
+ convert_float_timestamp2str(record)
+
+ return record
+
+
+def _postprocess_action(action):
+ if action.get('extra'):
+ action['extra'] = _unserialize(action['extra'])
+
+ return action
+
+
+def _postprocess_wim_account(wim_account, hide=_CONFIDENTIAL_FIELDS):
+ """Do the default postprocessing and convert the 'created' field from
+ string to boolean
+ """
+ # Fix fields from join
+ for field in ('type', 'description', 'wim_url'):
+ if field in wim_account:
+ wim_account['wim.'+field] = wim_account.pop(field)
+
+ for field in ('id', 'nfvo_tenant_id', 'wim_account_id'):
+ if field in wim_account:
+ wim_account['association.'+field] = wim_account.pop(field)
+
+ wim_account = _postprocess_record(wim_account, hide)
+
+ created = wim_account.get('created')
+ wim_account['created'] = (created is True or created == 'true')
+
+ return wim_account
+
+
+def _postprocess_wim_port_mapping(mapping, hide=_CONFIDENTIAL_FIELDS):
+ mapping = _postprocess_record(mapping, hide=hide)
+ mapping_info = mapping.get('service_mapping_info', None) or {}
+ mapping['service_mapping_info'] = mapping_info
+ return mapping
+
+
+def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):
+ """Obfuscate confidential fields from the input dict.
+
+ Note:
+ This function performs a SHALLOW operation.
+ """
+ if not(isinstance(record, dict) and fields):
+ return record
+
+ keys = list(record.keys())
+ keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+ return merge_dicts(record, {k: '********' for k in keys if record[k]})
+
+
+def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,
+ fields=_SERIALIZED_FIELDS):
+ """Unserialize fields that where stored in the database as a serialized
+ YAML (or JSON)
+ """
+ keys = list(record.keys())
+ keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+ return merge_dicts(record, {
+ key: hide_confidential_fields(_unserialize(record[key]), hide)
+ for key in keys if record[key]
+ })
+
+
+def serialize_fields(record, fields=_SERIALIZED_FIELDS):
+ """Serialize fields to be stored in the database as YAML"""
+ keys = list(record.keys())
+ keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+ return merge_dicts(record, {
+ key: _serialize(record[key])
+ for key in keys if record[key] is not None
+ })
+
+
+def _decide_name_or_uuid(value):
+ reference = value
+
+ if isinstance(value, (list, tuple)):
+ reference = value[0] if value else ''
+
+ return 'uuid' if check_valid_uuid(reference) else 'name'
+
+
+def _compose_where_from_uuids_or_names(**conditions):
+ """Create a dict containing the right conditions to be used in a database
+ query.
+
+ This function chooses between ``names`` and ``uuid`` fields based on the
+ format of the passed string.
+ If a list is passed, the first element of the list will be used to choose
+ the name of the field.
+ If a ``None`` value is passed, ``uuid`` is used.
+
+ Note that this function automatically translates ``tenant`` to
+ ``nfvo_tenant`` for the sake of brevity.
+
+ Example:
+ >>> _compose_where_from_uuids_or_names(
+ wim='abcdef',
+ tenant=['xyz123', 'def456']
+ datacenter='5286a274-8a1b-4b8d-a667-9c94261ad855')
+ {'wim.name': 'abcdef',
+ 'nfvo_tenant.name': ['xyz123', 'def456']
+ 'datacenter.uuid': '5286a274-8a1b-4b8d-a667-9c94261ad855'}
+ """
+ if 'tenant' in conditions:
+ conditions['nfvo_tenant'] = conditions.pop('tenant')
+
+ return {
+ '{}.{}'.format(kind, _decide_name_or_uuid(value)): value
+ for kind, value in conditions.items() if value
+ }
+
+
+def _str2id(text):
+ """Create an ID (following the UUID format) from a piece of arbitrary
+ text.
+
+ Different texts should generate different IDs, and the same text should
+ generate the same ID in a repeatable way.
+ """
+ return sha1(text).hexdigest()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from ..openmano_schemas import (
+ description_schema,
+ name_schema,
+ nameshort_schema
+)
+
+# WIM -------------------------------------------------------------------------
+wim_types = ["ietfl2vpn", "dynpac", "arista_cloudvision", "floodlightof", "onosof", "onos_vpls", "odlof", "dummy"]
+
+dpid_type = {
+ "type": "string",
+ "pattern":
+ "^[0-9a-zA-Z]+(:[0-9a-zA-Z]+)*$"
+}
+
+port_type = {
+ "oneOf": [
+ {"type": "string",
+ "minLength": 1,
+ "maxLength": 5},
+ {"type": "integer",
+ "minimum": 1,
+ "maximum": 65534}
+ ]
+}
+
+wim_port_mapping_desc = {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "datacenter_name": nameshort_schema,
+ "pop_wan_mappings": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "device_id": nameshort_schema,
+ "device_interface_id": nameshort_schema,
+ "service_endpoint_id": name_schema,
+ "switch_dpid": dpid_type,
+ "switch_port": port_type,
+ "service_mapping_info": {
+ "type": "object",
+ "properties": {
+ "mapping_type": name_schema,
+ },
+ "additionalProperties": True,
+ "required": ["mapping_type"]
+ }
+ },
+ "required": ["service_endpoint_id"]
+ }
+ }
+ },
+ "required": ["datacenter_name", "pop_wan_mappings"]
+ }
+}
+
+wim_schema_properties = {
+ "name": name_schema,
+ "description": description_schema,
+ "type": {
+ "type": "string",
+ # "enum": ["ietfl2vpn", "onos", "odl", "dynpac", "dummy", ...]
+ },
+ "wim_url": description_schema,
+ "config": {
+ "type": "object",
+ "properties": {
+ "wim_port_mapping": wim_port_mapping_desc
+ }
+ }
+}
+
+wim_schema = {
+ "title": "wim information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "wim": {
+ "type": "object",
+ "properties": wim_schema_properties,
+ "required": ["name", "type", "wim_url"],
+ }
+ },
+ "required": ["wim"],
+}
+
+wim_edit_schema = {
+ "title": "wim edit information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "wim": {
+ "type": "object",
+ "properties": wim_schema_properties,
+ }
+ },
+ "required": ["wim"],
+}
+
+wim_account_schema = {
+ "title": "wim account information schema",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "wim_account": {
+ "type": "object",
+ "properties": {
+ "name": name_schema,
+ "user": nameshort_schema,
+ "password": nameshort_schema,
+ "config": {"type": "object"}
+ },
+ }
+ },
+ "required": ["wim_account"],
+}
+
+wim_port_mapping_schema = {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "wim mapping information schema",
+ "type": "object",
+ "properties": {
+ "wim_port_mapping": wim_port_mapping_desc
+ },
+ "required": ["wim_port_mapping"]
+}
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=W0621
+
+from __future__ import unicode_literals
+
+import json
+from time import time
+from textwrap import wrap
+
+from ...tests.db_helpers import uuid, sha1
+
+NUM_WIMS = 3
+NUM_TENANTS = 2
+NUM_DATACENTERS = 2
+
+
+# In the following functions, the identifiers should be simple integers
+
+
+def wim(identifier=0):
+ return {'name': 'wim%d' % identifier,
+ 'uuid': uuid('wim%d' % identifier),
+ 'wim_url': 'localhost',
+ 'type': 'ietfl2vpn'}
+
+
+def tenant(identifier=0):
+ return {'name': 'tenant%d' % identifier,
+ 'uuid': uuid('tenant%d' % identifier)}
+
+
+def wim_account(wim, tenant):
+ return {'name': 'wim-account%d%d' % (tenant, wim),
+ 'uuid': uuid('wim-account%d%d' % (tenant, wim)),
+ 'user': 'user%d%d' % (tenant, wim),
+ 'password': 'password%d%d' % (tenant, wim),
+ 'wim_id': uuid('wim%d' % wim),
+ 'created': 'true'}
+
+
+def wim_tenant_association(wim, tenant):
+ return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+ 'wim_id': uuid('wim%d' % wim),
+ 'wim_account_id': uuid('wim-account%d%d' % (tenant, wim))}
+
+
+def wim_set(identifier=0, tenant=0):
+ """Records necessary to create a WIM and connect it to a tenant"""
+ return [
+ {'wims': [wim(identifier)]},
+ {'wim_accounts': [wim_account(identifier, tenant)]},
+ {'wim_nfvo_tenants': [wim_tenant_association(identifier, tenant)]}
+ ]
+
+
+def _datacenter_to_switch_port(dc_id, port=None):
+ digits = 16
+ switch = ':'.join(wrap(('%0' + str(digits) + 'x') % int(dc_id), 2))
+ return (switch, str((port or int(dc_id)) + 1))
+
+
+def datacenter(identifier, external_ports_config=False):
+ config = '' if not external_ports_config else json.dumps({
+ 'external_connections': [
+ {'condition': {
+ 'provider:physical_network': 'provider',
+ 'encapsulation_type': 'vlan'},
+ 'vim_external_port':
+ dict(zip(('switch', 'port'),
+ _datacenter_to_switch_port(identifier)))}
+ ]})
+
+ return {'uuid': uuid('dc%d' % identifier),
+ 'name': 'dc%d' % identifier,
+ 'type': 'openvim',
+ 'vim_url': 'localhost',
+ 'config': config}
+
+
+def datacenter_account(datacenter, tenant):
+ return {'name': 'dc-account%d%d' % (tenant, datacenter),
+ 'uuid': uuid('dc-account%d%d' % (tenant, datacenter)),
+ 'datacenter_id': uuid('dc%d' % datacenter),
+ 'created': 'true'}
+
+
+def datacenter_tenant_association(datacenter, tenant):
+ return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+ 'datacenter_id': uuid('dc%d' % datacenter),
+ 'datacenter_tenant_id': uuid('dc-account%d%d' % (tenant, datacenter))}
+
+
+def datacenter_set(identifier=0, tenant=0):
+ """Records necessary to create a datacenter and connect it to a tenant"""
+ return [
+ {'datacenters': [datacenter(identifier)]},
+ {'datacenter_tenants': [datacenter_account(identifier, tenant)]},
+ {'tenants_datacenters': [
+ datacenter_tenant_association(identifier, tenant)
+ ]}
+ ]
+
+
+def wim_port_mapping(wim, datacenter,
+ pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA', pop_port=None,
+ wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB', wan_port=None):
+ mapping_info = {'mapping_type': 'dpid-port',
+ 'wan_switch_dpid': wan_dpid,
+ 'wan_switch_port': (str(wan_port) if wan_port else
+ str(int(datacenter) + int(wim) + 1))}
+ id_ = 'dpid-port|' + sha1(json.dumps(mapping_info, sort_keys=True))
+
+ return {'wim_id': uuid('wim%d' % wim),
+ 'datacenter_id': uuid('dc%d' % datacenter),
+ 'device_id': pop_dpid,
+ 'device_interface_id': (str(pop_port) if pop_port else
+ str(int(datacenter) + int(wim) + 1)),
+ # ^ Datacenter router have one port managed by each WIM
+ 'service_endpoint_id': id_,
+ # ^ WIM managed router have one port connected to each DC
+ 'service_mapping_info': json.dumps(mapping_info)}
+
+
+def processed_port_mapping(wim, datacenter,
+ num_pairs=1,
+ pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA',
+ wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB'):
+ """Emulate the response of the Persistence class, where the records in the
+ data base are grouped by wim and datacenter
+ """
+ return {
+ 'wim_id': uuid('wim%d' % wim),
+ 'datacenter_id': uuid('dc%d' % datacenter),
+ 'pop_wan_mappings': [
+ {'device_id': pop_dpid,
+ 'device_interface_id': wim + 1 + i,
+ 'service_endpoint_id':
+ sha1('dpid-port|%s|%d' % (wan_dpid, datacenter + 1 + i)),
+ 'service_mapping_info': {
+ 'mapping_type': 'dpid-port',
+ 'wan_switch_dpid': wan_dpid,
+ 'wan_switch_port': datacenter + 1 + i}}
+ for i in range(num_pairs)
+ ]
+ }
+
+
+def consistent_set(num_wims=NUM_WIMS, num_tenants=NUM_TENANTS,
+ num_datacenters=NUM_DATACENTERS,
+ external_ports_config=False):
+ return [
+ {'nfvo_tenants': [tenant(i) for i in range(num_tenants)]},
+ {'wims': [wim(j) for j in range(num_wims)]},
+ {'wim_accounts': [
+ wim_account(j, i)
+ for i in range(num_tenants)
+ for j in range(num_wims)
+ ]},
+ {'wim_nfvo_tenants': [
+ wim_tenant_association(j, i)
+ for i in range(num_tenants)
+ for j in range(num_wims)
+ ]},
+ {'datacenters': [
+ datacenter(k, external_ports_config)
+ for k in range(num_datacenters)
+ ]},
+ {'datacenter_tenants': [
+ datacenter_account(k, i)
+ for i in range(num_tenants)
+ for k in range(num_datacenters)
+ ]},
+ {'tenants_datacenters': [
+ datacenter_tenant_association(k, i)
+ for i in range(num_tenants)
+ for k in range(num_datacenters)
+ ]},
+ {'wim_port_mappings': [
+ (wim_port_mapping(j, k, *_datacenter_to_switch_port(k))
+ if external_ports_config else wim_port_mapping(j, k))
+ for j in range(num_wims)
+ for k in range(num_datacenters)
+ ]},
+ ]
+
+
+def instance_nets(num_datacenters=2, num_links=2, status='BUILD'):
+ """Example of multi-site deploy with N datacenters and M WAN links between
+ them (e.g M = 2 -> back and forth)
+ """
+ return [
+ {'uuid': uuid('net%d%d' % (k, li)),
+ 'datacenter_id': uuid('dc%d' % k),
+ 'datacenter_tenant_id': uuid('dc-account0%d' % k),
+ 'instance_scenario_id': uuid('nsr0'),
+ # ^ instance_scenario_id == NS Record id
+ 'sce_net_id': uuid('vld%d' % li),
+ # ^ scenario net id == VLD id
+ 'status': status,
+ 'vim_net_id': None,
+ 'created': True}
+ for k in range(num_datacenters)
+ for li in range(num_links)
+ ]
+
+
+def wim_actions(action='CREATE', status='SCHEDULED',
+ action_id=None, instance=0,
+ wim=0, tenant=0, num_links=1):
+ """Create a list of actions for the WIM,
+
+ Arguments:
+ action: type of action (CREATE) by default
+ wim: WIM fixture index to create actions for
+ tenant: tenant fixture index to create actions for
+ num_links: number of WAN links to be established by each WIM
+ """
+
+ action_id = action_id or 'ACTION-{}'.format(time())
+
+ return [
+ {
+ 'action': action,
+ 'wim_internal_id': uuid('-wim-net%d%d%d' % (wim, instance, link)),
+ 'wim_account_id': uuid('wim-account%d%d' % (tenant, wim)),
+ 'instance_action_id': action_id,
+ 'item': 'instance_wim_nets',
+ 'item_id': uuid('wim-net%d%d%d' % (wim, instance, link)),
+ 'status': status,
+ 'task_index': link,
+ 'created_at': time(),
+ 'modified_at': time(),
+ 'extra': None
+ }
+ for link in range(num_links)
+ ]
+
+
+def instance_action(tenant=0, instance=0, action_id=None,
+ num_tasks=1, num_done=0, num_failed=0):
+ action_id = action_id or 'ACTION-{}'.format(time())
+
+ return {
+ 'uuid': action_id,
+ 'tenant_id': uuid('tenant%d' % tenant),
+ 'instance_id': uuid('nsr%d' % instance),
+ 'number_tasks': num_tasks,
+ 'number_done': num_done,
+ 'number_failed': num_failed,
+ }
+
+
+def instance_wim_nets(instance=0, wim=0, num_links=1,
+ status='SCHEDULED_CREATION'):
+ """Example of multi-site deploy with N wims and M WAN links between
+ them (e.g M = 2 -> back and forth)
+ VIM nets
+ """
+ return [
+ {'uuid': uuid('wim-net%d%d%d' % (wim, instance, li)),
+ 'wim_id': uuid('wim%d' % wim),
+ 'wim_account_id': uuid('wim-account%d' % wim),
+ 'wim_internal_id': uuid('-net%d%d' % (wim, li)),
+ 'instance_scenario_id': uuid('nsr%d' % instance),
+ # ^ instance_scenario_id == NS Record id
+ 'sce_net_id': uuid('vld%d' % li),
+ # ^ scenario net id == VLD id
+ 'status': status,
+ 'created': False}
+ for li in range(num_links)
+ ]
+
+
+def instance_vm(instance=0, vim_info=None):
+ vim_info = {'OS-EXT-SRV-ATTR:hypervisor_hostname': 'host%d' % instance}
+ return {
+ 'uuid': uuid('vm%d' % instance),
+ 'instance_vnf_id': uuid('vnf%d' % instance),
+ 'vm_id': uuid('vm%d' % instance),
+ 'vim_vm_id': uuid('vm%d' % instance),
+ 'status': 'ACTIVE',
+ 'vim_info': vim_info,
+ }
+
+
+def instance_interface(instance=0, interface=0, datacenter=0, link=0):
+ return {
+ 'uuid': uuid('interface%d%d' % (instance, interface)),
+ 'instance_vm_id': uuid('vm%d' % instance),
+ 'instance_net_id': uuid('net%d%d' % (datacenter, link)),
+ 'interface_id': uuid('iface%d' % interface),
+ 'type': 'external',
+ 'vlan': 3
+ }
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101
+
+# from __future__ import unicode_literals, print_function
+
+import json
+import unittest
+from time import time
+
+from unittest.mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+ TestCaseWithDatabasePerTest,
+ disable_foreign_keys,
+ uuid,
+)
+from ..persistence import WimPersistence, preprocess_record
+from ..wan_link_actions import WanLinkCreate, WanLinkDelete
+from ..sdnconn import SdnConnectorError
+
+
+class TestActionsWithDb(TestCaseWithDatabasePerTest):
+ def setUp(self):
+ super(TestActionsWithDb, self).setUp()
+ self.persist = WimPersistence(self.db)
+ self.connector = MagicMock()
+ self.ovim = MagicMock()
+
+
+class TestCreate(TestActionsWithDb):
+ @disable_foreign_keys
+ def test_process__instance_nets_on_build(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and the local network in each datacenter is still being built
+ wan_link = eg.instance_wim_nets()
+ instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+ for net in instance_nets:
+ net['status'] = 'BUILD'
+ self.populate([{'instance_nets': instance_nets,
+ 'instance_wim_nets': wan_link}])
+
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ now = time()
+ action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+ action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+ action.sce_net_id = instance_nets[0]['sce_net_id']
+ # -- ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ self.populate([{'vim_wim_actions': action_record}])
+ # <-- #
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be defered
+ assert action.is_scheduled
+ self.assertEqual(action.extra['attempts'], 1)
+ self.assertGreater(action.extra['last_attempted_at'], now)
+
+ @disable_foreign_keys
+ def test_process__instance_nets_on_error(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and at least one local network is in a not good state (error, or
+ # being deleted)
+ instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+ instance_nets[1]['status'] = 'SCHEDULED_DELETION'
+ wan_link = eg.instance_wim_nets()
+ self.populate([{'instance_nets': instance_nets,
+ 'instance_wim_nets': wan_link}])
+
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+ action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+ action.sce_net_id = instance_nets[0]['sce_net_id']
+ # -- ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ self.populate([{'vim_wim_actions': action_record}])
+ # <-- #
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should fail
+ assert action.is_failed
+ self.assertIn('issue with the local networks', action.error_msg)
+ self.assertIn('SCHEDULED_DELETION', action.error_msg)
+
+ def prepare_create__rules(self):
+ db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+ num_datacenters=2,
+ external_ports_config=True)
+
+ instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+ status='ACTIVE')
+ for i, net in enumerate(instance_nets):
+ net['vim_info'] = {}
+ net['vim_info']['provider:physical_network'] = 'provider'
+ net['vim_info']['encapsulation_type'] = 'vlan'
+ net['vim_info']['encapsulation_id'] = i
+ net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+ instance_action = eg.instance_action(action_id='ACTION-000')
+
+ db_state += [
+ {'instance_wim_nets': eg.instance_wim_nets()},
+ {'instance_nets': [preprocess_record(r) for r in instance_nets]},
+ {'instance_actions': instance_action}]
+
+ action = WanLinkCreate(
+ eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+ # --> ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ db_state += [{'vim_wim_actions': action_record}]
+
+ return db_state, action
+
+ @disable_foreign_keys
+ def test_process__rules(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and the local network in each datacenter is already created
+ db_state, action = self.prepare_create__rules()
+ self.populate(db_state)
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ number_done = instance_action['number_done']
+ number_failed = instance_action['number_failed']
+
+ # If the connector works fine
+ with patch.object(self.connector, 'create_connectivity_service',
+ lambda *_, **__: (uuid('random-id'), None)):
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be succeeded
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': action.instance_action_id,
+ 'task_index': action.task_index})
+ self.assertEqual(db_action['status'], 'DONE')
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ self.assertEqual(instance_action['number_done'], number_done + 1)
+ self.assertEqual(instance_action['number_failed'], number_failed)
+
+ @disable_foreign_keys
+ def test_process__rules_fail(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and the local network in each datacenter is already created
+ db_state, action = self.prepare_create__rules()
+ self.populate(db_state)
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ number_done = instance_action['number_done']
+ number_failed = instance_action['number_failed']
+
+ # If the connector raises an error
+ with patch.object(self.connector, 'create_connectivity_service',
+ MagicMock(side_effect=SdnConnectorError('foobar'))):
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be fail
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': action.instance_action_id,
+ 'task_index': action.task_index})
+ self.assertEqual(db_action['status'], 'FAILED')
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ self.assertEqual(instance_action['number_done'], number_done)
+ self.assertEqual(instance_action['number_failed'], number_failed + 1)
+
+ def prepare_create__sdn(self):
+ db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+ num_datacenters=2,
+ external_ports_config=False)
+
+ # Make sure all port_mappings are predictable
+ switch = 'AA:AA:AA:AA:AA:AA:AA:AA'
+ port = 1
+ port_mappings = next(r['wim_port_mappings']
+ for r in db_state if 'wim_port_mappings' in r)
+ for mapping in port_mappings:
+ mapping['device_id'] = switch
+ mapping['device_interface_id'] = port
+
+ instance_action = eg.instance_action(action_id='ACTION-000')
+ instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+ status='ACTIVE')
+ for i, net in enumerate(instance_nets):
+ net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+ db_state += [{'instance_nets': instance_nets},
+ {'instance_wim_nets': eg.instance_wim_nets()},
+ {'instance_actions': instance_action}]
+
+ action = WanLinkCreate(
+ eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+ # --> ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ db_state += [{'vim_wim_actions': action_record}]
+
+ ovim_patch = patch.object(
+ self.ovim, 'get_ports', MagicMock(return_value=[{
+ 'switch_dpid': switch,
+ 'switch_port': port,
+ }]))
+
+ return db_state, action, ovim_patch
+
+ @disable_foreign_keys
+ def test_process__sdn(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and the local network in each datacenter is already created
+ db_state, action, ovim_patch = self.prepare_create__sdn()
+ self.populate(db_state)
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ number_done = instance_action['number_done']
+ number_failed = instance_action['number_failed']
+
+ connector_patch = patch.object(
+ self.connector, 'create_connectivity_service',
+ lambda *_, **__: (uuid('random-id'), None))
+
+ # If the connector works fine
+ with connector_patch, ovim_patch:
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be succeeded
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': action.instance_action_id,
+ 'task_index': action.task_index})
+ self.assertEqual(db_action['status'], 'DONE')
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ self.assertEqual(instance_action['number_done'], number_done + 1)
+ self.assertEqual(instance_action['number_failed'], number_failed)
+
+ @disable_foreign_keys
+ def test_process__sdn_fail(self):
+ # Given we want 1 WAN link between 2 datacenters
+ # and the local network in each datacenter is already created
+ db_state, action, ovim_patch = self.prepare_create__sdn()
+ self.populate(db_state)
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ number_done = instance_action['number_done']
+ number_failed = instance_action['number_failed']
+
+ connector_patch = patch.object(
+ self.connector, 'create_connectivity_service',
+ MagicMock(side_effect=SdnConnectorError('foobar')))
+
+ # If the connector throws an error
+ with connector_patch, ovim_patch:
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be fail
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': action.instance_action_id,
+ 'task_index': action.task_index})
+ self.assertEqual(db_action['status'], 'FAILED')
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ self.assertEqual(instance_action['number_done'], number_done)
+ self.assertEqual(instance_action['number_failed'], number_failed + 1)
+
+
+class TestDelete(TestActionsWithDb):
+ @disable_foreign_keys
+ def test_process__no_internal_id(self):
+ # Given no WAN link was created yet,
+ # when we try to process a DELETE action, with no wim_internal_id
+ action = WanLinkDelete(eg.wim_actions('DELETE')[0])
+ action.wim_internal_id = None
+ # -- ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ self.populate([{'vim_wim_actions': action_record,
+ 'instance_wim_nets': eg.instance_wim_nets()}])
+ # <-- #
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should succeed
+ assert action.is_done
+
+ def prepare_delete(self):
+ db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+ num_datacenters=2,
+ external_ports_config=True)
+
+ instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+ status='ACTIVE')
+ for i, net in enumerate(instance_nets):
+ net['vim_info'] = {}
+ net['vim_info']['provider:physical_network'] = 'provider'
+ net['vim_info']['encapsulation_type'] = 'vlan'
+ net['vim_info']['encapsulation_id'] = i
+ net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+ instance_action = eg.instance_action(action_id='ACTION-000')
+
+ db_state += [
+ {'instance_wim_nets': eg.instance_wim_nets()},
+ {'instance_nets': [preprocess_record(r) for r in instance_nets]},
+ {'instance_actions': instance_action}]
+
+ action = WanLinkDelete(
+ eg.wim_actions('DELETE', action_id='ACTION-000')[0])
+ # --> ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ db_state += [{'vim_wim_actions': action_record}]
+
+ return db_state, action
+
+ @disable_foreign_keys
+ def test_process(self):
+ # Given we want to delete 1 WAN link between 2 datacenters
+ db_state, action = self.prepare_delete()
+ self.populate(db_state)
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ number_done = instance_action['number_done']
+ number_failed = instance_action['number_failed']
+
+ connector_patch = patch.object(
+ self.connector, 'delete_connectivity_service')
+
+ # If the connector works fine
+ with connector_patch:
+ # When we try to process a DELETE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then the action should be succeeded
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': action.instance_action_id,
+ 'task_index': action.task_index})
+ self.assertEqual(db_action['status'], 'DONE')
+
+ instance_action = self.persist.get_by_uuid(
+ 'instance_actions', action.instance_action_id)
+ self.assertEqual(instance_action['number_done'], number_done + 1)
+ self.assertEqual(instance_action['number_failed'], number_failed)
+
+ @disable_foreign_keys
+ def test_process__wan_link_error(self):
+ # Given we have a delete action that targets a wan link with an error
+ db_state, action = self.prepare_delete()
+ wan_link = [tables for tables in db_state
+ if tables.get('instance_wim_nets')][0]['instance_wim_nets']
+ from pprint import pprint
+ pprint(wan_link)
+ wan_link[0]['status'] = 'ERROR'
+ self.populate(db_state)
+
+ # When we try to process it
+ action.process(self.connector, self.persist, self.ovim)
+
+ # Then it should fail
+ assert action.is_failed
+
+ def create_action(self):
+ action = WanLinkCreate(
+ eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+ # --> ensure it is in the database for updates --> #
+ action_record = action.as_record()
+ action_record['extra'] = json.dumps(action_record['extra'])
+ self.populate([{'vim_wim_actions': action_record}])
+
+ return action
+
+ @disable_foreign_keys
+ def test_create_and_delete(self):
+ # Given a CREATE action was well succeeded
+ db_state, delete_action = self.prepare_delete()
+ self.populate(db_state)
+
+ delete_action.save(self.persist, task_index=1)
+ create_action = self.create_action()
+
+ connector_patch = patch.multiple(
+ self.connector,
+ delete_connectivity_service=MagicMock(),
+ create_connectivity_service=(
+ lambda *_, **__: (uuid('random-id'), None)))
+
+ with connector_patch: # , ovim_patch:
+ create_action.process(self.connector, self.persist, self.ovim)
+
+ # When we try to process a CREATE action that refers to the same
+ # instance_scenario_id and sce_net_id
+ with connector_patch:
+ delete_action.process(self.connector, self.persist, self.ovim)
+
+ # Then the DELETE action should be successful
+ db_action = self.persist.query_one('vim_wim_actions', WHERE={
+ 'instance_action_id': delete_action.instance_action_id,
+ 'task_index': delete_action.task_index})
+ self.assertEqual(db_action['status'], 'DONE')
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+
+from unittest.mock import MagicMock
+
+from . import fixtures as eg
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ..errors import NoWimConnectedToDatacenters
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+
+
+class TestWimEngineDbMethods(TestCaseWithDatabasePerTest):
+ def setUp(self):
+ super(TestWimEngineDbMethods, self).setUp()
+ self.persist = WimPersistence(self.db)
+ self.engine = WimEngine(persistence=self.persist)
+ self.addCleanup(self.engine.stop_threads)
+
+ def populate(self, seeds=None):
+ super(TestWimEngineDbMethods, self).populate(
+ seeds or eg.consistent_set())
+
+ def test_find_common_wims(self):
+ # Given we have 2 WIM, 3 datacenters, but just 1 of the WIMs have
+ # access to them
+ self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+ eg.wim_set(0, 0) +
+ eg.wim_set(1, 0) +
+ eg.datacenter_set(0, 0) +
+ eg.datacenter_set(1, 0) +
+ eg.datacenter_set(2, 0) +
+ [{'wim_port_mappings': [
+ eg.wim_port_mapping(0, 0),
+ eg.wim_port_mapping(0, 1),
+ eg.wim_port_mapping(0, 2)]}])
+
+ # When we retrieve the wims interconnecting some datacenters
+ wim_ids = self.engine.find_common_wims(
+ [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+ # Then we should have just the first wim
+ self.assertEqual(len(wim_ids), 1)
+ self.assertEqual(wim_ids[0], uuid('wim0'))
+
+ def test_find_common_wims_multiple(self):
+ # Given we have 2 WIM, 3 datacenters, and all the WIMs have access to
+ # all datacenters
+ self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+ eg.wim_set(0, 0) +
+ eg.wim_set(1, 0) +
+ eg.datacenter_set(0, 0) +
+ eg.datacenter_set(1, 0) +
+ eg.datacenter_set(2, 0) +
+ [{'wim_port_mappings': [
+ eg.wim_port_mapping(0, 0),
+ eg.wim_port_mapping(0, 1),
+ eg.wim_port_mapping(0, 2),
+ eg.wim_port_mapping(1, 0),
+ eg.wim_port_mapping(1, 1),
+ eg.wim_port_mapping(1, 2)]}])
+
+ # When we retrieve the wims interconnecting tree datacenters
+ wim_ids = self.engine.find_common_wims(
+ [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+ # Then we should have all the wims
+ self.assertEqual(len(wim_ids), 2)
+ self.assertItemsEqual(wim_ids, [uuid('wim0'), uuid('wim1')])
+
+ def test_find_common_wim(self):
+ # Given we have 1 WIM, 3 datacenters but the WIM have access to just 2
+ # of them
+ self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+ eg.wim_set(0, 0) +
+ eg.datacenter_set(0, 0) +
+ eg.datacenter_set(1, 0) +
+ eg.datacenter_set(2, 0) +
+ [{'wim_port_mappings': [
+ eg.wim_port_mapping(0, 0),
+ eg.wim_port_mapping(0, 1)]}])
+
+ # When we retrieve the common wim for the 2 datacenter that are
+ # interconnected
+ wim_id = self.engine.find_common_wim(
+ [uuid('dc0'), uuid('dc1')], tenant='tenant0')
+
+ # Then we should find the wim
+ self.assertEqual(wim_id, uuid('wim0'))
+
+ # When we try to retrieve the common wim for the all the datacenters
+ # Then a NoWimConnectedToDatacenters exception should be raised
+ with self.assertRaises(NoWimConnectedToDatacenters):
+ self.engine.find_common_wim(
+ [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+ def test_find_common_wim__different_tenants(self):
+ # Given we have 1 WIM and 2 datacenters connected but the WIMs don't
+ # belong to the tenant we have access to...
+ self.populate([{'nfvo_tenants': [eg.tenant(0), eg.tenant(1)]}] +
+ eg.wim_set(0, 0) +
+ eg.datacenter_set(0, 0) +
+ eg.datacenter_set(1, 0) +
+ [{'wim_port_mappings': [
+ eg.wim_port_mapping(0, 0),
+ eg.wim_port_mapping(0, 1)]}])
+
+ # When we retrieve the common wim for the 2 datacenter that are
+ # interconnected, but using another tenant,
+ # Then we should get an exception
+ with self.assertRaises(NoWimConnectedToDatacenters):
+ self.engine.find_common_wim(
+ [uuid('dc0'), uuid('dc1')], tenant='tenant1')
+
+
+class TestWimEngine(unittest.TestCase):
+ def test_derive_wan_link(self):
+ # Given we have 2 datacenters connected by the same WIM, with port
+ # mappings registered
+ mappings = [eg.processed_port_mapping(0, 0),
+ eg.processed_port_mapping(0, 1)]
+ persist = MagicMock(
+ get_wim_port_mappings=MagicMock(return_value=mappings))
+
+ engine = WimEngine(persistence=persist)
+ self.addCleanup(engine.stop_threads)
+
+ # When we receive a list of 4 instance nets, representing
+ # 2 VLDs connecting 2 datacenters each
+ instance_nets = eg.instance_nets(2, 2)
+ wan_links = engine.derive_wan_links({}, instance_nets, uuid('tenant0'))
+
+ # Then we should derive 2 wan_links with the same instance_scenario_id
+ # and different scenario_network_id
+ self.assertEqual(len(wan_links), 2)
+ for link in wan_links:
+ self.assertEqual(link['instance_scenario_id'], uuid('nsr0'))
+ # Each VLD needs a network to be created in each datacenter
+ self.assertItemsEqual([li['sce_net_id'] for li in wan_links],
+ [uuid('vld0'), uuid('vld1')])
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+
+import bottle
+from unittest.mock import MagicMock, patch
+from webtest import TestApp
+
+from . import fixtures as eg # "examples"
+from ...http_tools.errors import Conflict, Not_Found
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ...utils import merge_dicts
+from ..http_handler import WimHandler
+
+OK = 200
+
+
+@patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock()) # Avoid external calls
+@patch('osm_ro.wim.wim_thread.WimThread.start', MagicMock()) # Avoid running
+class TestHttpHandler(TestCaseWithDatabasePerTest):
+ def setUp(self):
+ super(TestHttpHandler, self).setUp()
+ bottle.debug(True)
+ handler = WimHandler(db=self.db)
+ self.engine = handler.engine
+ self.addCleanup(self.engine.stop_threads)
+ self.app = TestApp(handler.wsgi_app)
+
+ def populate(self, seeds=None):
+ super(TestHttpHandler, self).populate(seeds or eg.consistent_set())
+
+ def test_list_wims(self):
+ # Given some wims are registered in the database
+ self.populate()
+ # when a GET /<tenant_id>/wims request arrives
+ tenant_id = uuid('tenant0')
+ response = self.app.get('/{}/wims'.format(tenant_id))
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and all the registered wims should be present
+ retrieved_wims = {v['name']: v for v in response.json['wims']}
+ for name in retrieved_wims:
+ identifier = int(name.replace('wim', ''))
+ self.assertDictContainsSubset(
+ eg.wim(identifier), retrieved_wims[name])
+
+ def test_show_wim(self):
+ # Given some wims are registered in the database
+ self.populate()
+ # when a GET /<tenant_id>/wims/<wim_id> request arrives
+ tenant_id = uuid('tenant0')
+ wim_id = uuid('wim1')
+ response = self.app.get('/{}/wims/{}'.format(tenant_id, wim_id))
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and the registered wim (wim1) should be present
+ self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+ # Moreover, it also works with tenant_id = all
+ response = self.app.get('/any/wims/{}'.format(wim_id))
+ self.assertEqual(response.status_code, OK)
+ self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+
+ def test_show_wim__wim_doesnt_exists(self):
+ # Given wim_id does not refer to any already registered wim
+ self.populate()
+ # when a GET /<tenant_id>/wims/<wim_id> request arrives
+ tenant_id = uuid('tenant0')
+ wim_id = uuid('wim999')
+ response = self.app.get(
+ '/{}/wims/{}'.format(tenant_id, wim_id),
+ expect_errors=True)
+
+ # then the result should not be well succeeded
+ self.assertEqual(response.status_code, Not_Found)
+
+ def test_show_wim__tenant_doesnt_exists(self):
+ # Given wim_id does not refer to any already registered wim
+ self.populate()
+ # when a GET /<tenant_id>/wims/<wim_id> request arrives
+ tenant_id = uuid('tenant999')
+ wim_id = uuid('wim0')
+ response = self.app.get(
+ '/{}/wims/{}'.format(tenant_id, wim_id),
+ expect_errors=True)
+
+ # then the result should not be well succeeded
+ self.assertEqual(response.status_code, Not_Found)
+
+ def test_edit_wim(self):
+ # Given a WIM exists in the database
+ self.populate()
+ # when a PUT /wims/<wim_id> request arrives
+ wim_id = uuid('wim1')
+ response = self.app.put_json('/wims/{}'.format(wim_id), {
+ 'wim': {'name': 'My-New-Name'}})
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and the registered wim (wim1) should be present
+ self.assertDictContainsSubset(
+ merge_dicts(eg.wim(1), name='My-New-Name'),
+ response.json['wim'])
+
+ def test_edit_wim__port_mappings(self):
+ # Given a WIM exists in the database
+ self.populate()
+ # when a PUT /wims/<wim_id> request arrives
+ wim_id = uuid('wim1')
+ response = self.app.put_json(
+ '/wims/{}'.format(wim_id), {
+ 'wim': dict(
+ name='My-New-Name',
+ config={'wim_port_mapping': [{
+ 'datacenter_name': 'dc0',
+ 'pop_wan_mappings': [{
+ 'device_id': '00:AA:11:BB:22:CC:33:DD',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
+ 'mapping_type': 'dpid-port',
+ 'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:0A',
+ 'wan_switch_port': 1
+ }
+ }]}]
+ }
+ )
+ }
+ )
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and the registered wim (wim1) should be present
+ self.assertDictContainsSubset(
+ merge_dicts(eg.wim(1), name='My-New-Name'),
+ response.json['wim'])
+ # and the port mappings hould be updated
+ mappings = response.json['wim']['config']['wim_port_mapping']
+ self.assertEqual(len(mappings), 1)
+ self.assertEqual(
+ mappings[0]['pop_wan_mappings'][0]['device_id'],
+ '00:AA:11:BB:22:CC:33:DD')
+
+ def test_delete_wim(self):
+ # Given a WIM exists in the database
+ self.populate()
+ num_accounts = self.count('wim_accounts')
+ num_associations = self.count('wim_nfvo_tenants')
+ num_mappings = self.count('wim_port_mappings')
+
+ with self.engine.threads_running():
+ num_threads = len(self.engine.threads)
+ # when a DELETE /wims/<wim_id> request arrives
+ wim_id = uuid('wim1')
+ response = self.app.delete('/wims/{}'.format(wim_id))
+ num_threads_after = len(self.engine.threads)
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertIn('deleted', response.json['result'])
+ # and the registered wim1 should be deleted
+ response = self.app.get(
+ '/any/wims/{}'.format(wim_id),
+ expect_errors=True)
+ self.assertEqual(response.status_code, Not_Found)
+ # and all the dependent records in other tables should be deleted:
+ # wim_accounts, wim_nfvo_tenants, wim_port_mappings
+ self.assertEqual(self.count('wim_nfvo_tenants'),
+ num_associations - eg.NUM_TENANTS)
+ self.assertLess(self.count('wim_port_mappings'), num_mappings)
+ self.assertEqual(self.count('wim_accounts'),
+ num_accounts - eg.NUM_TENANTS)
+ # And the threads associated with the wim accounts should be stopped
+ self.assertEqual(num_threads_after, num_threads - eg.NUM_TENANTS)
+
+ def test_create_wim(self):
+ # Given no WIM exists yet
+ # when a POST /wims request arrives with the right payload
+ response = self.app.post_json('/wims', {'wim': eg.wim(999)})
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertEqual(response.json['wim']['name'], 'wim999')
+
+ def test_create_wim__port_mappings(self):
+ self.populate()
+ # when a POST /wims request arrives with the right payload
+ response = self.app.post_json(
+ '/wims', {
+ 'wim': merge_dicts(
+ eg.wim(999),
+ config={'wim_port_mapping': [{
+ 'datacenter_name': 'dc0',
+ 'pop_wan_mappings': [{
+ 'device_id': 'AA:AA:AA:AA:AA:AA:AA:01',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
+ 'mapping_type': 'dpid-port',
+ 'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:01',
+ 'wan_switch_port': 1
+ }
+ }]}]
+ }
+ )
+ }
+ )
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertEqual(response.json['wim']['name'], 'wim999')
+ self.assertEqual(
+ len(response.json['wim']['config']['wim_port_mapping']), 1)
+
+ def test_create_wim_account(self):
+ # Given a WIM and a NFVO tenant exist but are not associated
+ self.populate([{'wims': [eg.wim(0)]},
+ {'nfvo_tenants': [eg.tenant(0)]}])
+
+ with self.engine.threads_running():
+ num_threads = len(self.engine.threads)
+ # when a POST /<tenant_id>/wims/<wim_id> arrives
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+ {'wim_account': eg.wim_account(0, 0)})
+
+ num_threads_after = len(self.engine.threads)
+
+ # then a new thread should be created
+ self.assertEqual(num_threads_after, num_threads + 1)
+
+ # and the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertEqual(response.json['wim_account']['name'], 'wim-account00')
+
+ # and a new association record should be created
+ association = self.db.get_rows(FROM='wim_nfvo_tenants')
+ assert association
+ self.assertEqual(len(association), 1)
+ self.assertEqual(association[0]['wim_id'], uuid('wim0'))
+ self.assertEqual(association[0]['nfvo_tenant_id'], uuid('tenant0'))
+ self.assertEqual(association[0]['wim_account_id'],
+ response.json['wim_account']['uuid'])
+
+ def test_create_wim_account__existing_account(self):
+ # Given a WIM, a WIM account and a NFVO tenants exist
+ # But the NFVO and the WIM are not associated
+ self.populate([
+ {'wims': [eg.wim(0)]},
+ {'nfvo_tenants': [eg.tenant(0)]},
+ {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+ # when a POST /<tenant_id>/wims/<wim_id> arrives
+ # and it refers to an existing wim account
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+ {'wim_account': {'name': 'wim-account00'}})
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and the association should be created
+ association = self.db.get_rows(
+ FROM='wim_nfvo_tenants',
+ WHERE={'wim_id': uuid('wim0'),
+ 'nfvo_tenant_id': uuid('tenant0')})
+ assert association
+ self.assertEqual(len(association), 1)
+ # but no new wim_account should be created
+ wim_accounts = self.db.get_rows(FROM='wim_accounts')
+ self.assertEqual(len(wim_accounts), 1)
+ self.assertEqual(wim_accounts[0]['name'], 'wim-account00')
+
+ def test_create_wim_account__existing_account__differing(self):
+ # Given a WIM, a WIM account and a NFVO tenants exist
+ # But the NFVO and the WIM are not associated
+ self.populate([
+ {'wims': [eg.wim(0)]},
+ {'nfvo_tenants': [eg.tenant(0)]},
+ {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+ # when a POST /<tenant_id>/wims/<wim_id> arrives
+ # and it refers to an existing wim account,
+ # but with different fields
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+ 'wim_account': {
+ 'name': 'wim-account00',
+ 'user': 'john',
+ 'password': 'abc123'}},
+ expect_errors=True)
+
+ # then the request should not be well succeeded
+ self.assertEqual(response.status_code, Conflict)
+ # some useful message should be displayed
+ response.mustcontain('attempt to overwrite', 'user', 'password')
+ # and the association should not be created
+ association = self.db.get_rows(
+ FROM='wim_nfvo_tenants',
+ WHERE={'wim_id': uuid('wim0'),
+ 'nfvo_tenant_id': uuid('tenant0')})
+ assert not association
+
+ def test_create_wim_account__association_already_exists(self):
+ # Given a WIM, a WIM account and a NFVO tenants exist
+ # and are correctly associated
+ self.populate()
+ num_assoc_before = self.count('wim_nfvo_tenants')
+
+ # when a POST /<tenant_id>/wims/<wim_id> arrives trying to connect a
+ # WIM and a tenant for the second time
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+ 'wim_account': {
+ 'user': 'user999',
+ 'password': 'password999'}},
+ expect_errors=True)
+
+ # then the request should not be well succeeded
+ self.assertEqual(response.status_code, Conflict)
+ # the message should be useful
+ response.mustcontain('There is already', uuid('wim0'), uuid('tenant0'))
+
+ num_assoc_after = self.count('wim_nfvo_tenants')
+
+ # and the number of association record should not be increased
+ self.assertEqual(num_assoc_before, num_assoc_after)
+
+ def test_create_wim__tenant_doesnt_exist(self):
+ # Given a tenant not exists
+ self.populate()
+
+ # But the user tries to create a wim_account anyway
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant999'), uuid('wim0')), {
+ 'wim_account': {
+ 'user': 'user999',
+ 'password': 'password999'}},
+ expect_errors=True)
+
+ # then the request should not be well succeeded
+ self.assertEqual(response.status_code, Not_Found)
+ # the message should be useful
+ response.mustcontain('No record was found', uuid('tenant999'))
+
+ def test_create_wim__wim_doesnt_exist(self):
+ # Given a tenant not exists
+ self.populate()
+
+ # But the user tries to create a wim_account anyway
+ response = self.app.post_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim999')), {
+ 'wim_account': {
+ 'user': 'user999',
+ 'password': 'password999'}},
+ expect_errors=True)
+
+ # then the request should not be well succeeded
+ self.assertEqual(response.status_code, Not_Found)
+ # the message should be useful
+ response.mustcontain('No record was found', uuid('wim999'))
+
+ def test_update_wim_account(self):
+ # Given a WIM account connecting a tenant and a WIM exists
+ self.populate()
+
+ with self.engine.threads_running():
+ num_threads = len(self.engine.threads)
+
+ thread = self.engine.threads[uuid('wim-account00')]
+ reload = MagicMock(wraps=thread.reload)
+
+ with patch.object(thread, 'reload', reload):
+ # when a PUT /<tenant_id>/wims/<wim_id> arrives
+ response = self.app.put_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+ 'wim_account': {
+ 'name': 'account888',
+ 'user': 'user888'}})
+
+ num_threads_after = len(self.engine.threads)
+
+ # then the wim thread should be restarted
+ reload.assert_called_once()
+ # and no thread should be added or removed
+ self.assertEqual(num_threads_after, num_threads)
+
+ # and the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertEqual(response.json['wim_account']['name'], 'account888')
+ self.assertEqual(response.json['wim_account']['user'], 'user888')
+
+ def test_update_wim_account__multiple(self):
+ # Given a WIM account connected to several tenants
+ self.populate()
+
+ with self.engine.threads_running():
+ # when a PUT /any/wims/<wim_id> arrives
+ response = self.app.put_json(
+ '/any/wims/{}'.format(uuid('wim0')), {
+ 'wim_account': {
+ 'user': 'user888',
+ 'config': {'x': 888}}})
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ self.assertEqual(len(response.json['wim_accounts']), eg.NUM_TENANTS)
+
+ for account in response.json['wim_accounts']:
+ self.assertEqual(account['user'], 'user888')
+ self.assertEqual(account['config']['x'], 888)
+
+ def test_delete_wim_account(self):
+ # Given a WIM account exists and it is connected to a tenant
+ self.populate()
+
+ num_accounts_before = self.count('wim_accounts')
+
+ with self.engine.threads_running():
+ thread = self.engine.threads[uuid('wim-account00')]
+ exit = MagicMock(wraps=thread.exit)
+ num_threads = len(self.engine.threads)
+
+ with patch.object(thread, 'exit', exit):
+ # when a PUT /<tenant_id>/wims/<wim_id> arrives
+ response = self.app.delete_json(
+ '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')))
+
+ num_threads_after = len(self.engine.threads)
+
+ # then the wim thread should exit
+ self.assertEqual(num_threads_after, num_threads - 1)
+ exit.assert_called_once()
+
+ # and the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ response.mustcontain('account `wim-account00` deleted')
+
+ # and the number of wim_accounts should decrease
+ num_accounts_after = self.count('wim_accounts')
+ self.assertEqual(num_accounts_after, num_accounts_before - 1)
+
+ def test_delete_wim_account__multiple(self):
+ # Given a WIM account exists and it is connected to several tenants
+ self.populate()
+
+ num_accounts_before = self.count('wim_accounts')
+
+ with self.engine.threads_running():
+ # when a PUT /<tenant_id>/wims/<wim_id> arrives
+ response = self.app.delete_json(
+ '/any/wims/{}'.format(uuid('wim0')))
+
+ # then the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ response.mustcontain('account `wim-account00` deleted')
+ response.mustcontain('account `wim-account10` deleted')
+
+ # and the number of wim_accounts should decrease
+ num_accounts_after = self.count('wim_accounts')
+ self.assertEqual(num_accounts_after,
+ num_accounts_before - eg.NUM_TENANTS)
+
+ def test_delete_wim_account__doesnt_exist(self):
+ # Given we have a tenant that is not connected to a WIM
+ self.populate()
+ tenant = {'uuid': uuid('tenant888'), 'name': 'tenant888'}
+ self.populate([{'nfvo_tenants': [tenant]}])
+
+ num_accounts_before = self.count('wim_accounts')
+
+ # when a PUT /<tenant_id>/wims/<wim_id> arrives
+ response = self.app.delete(
+ '/{}/wims/{}'.format(uuid('tenant888'), uuid('wim0')),
+ expect_errors=True)
+
+ # then the request should not succeed
+ self.assertEqual(response.status_code, Not_Found)
+
+ # and the number of wim_accounts should not decrease
+ num_accounts_after = self.count('wim_accounts')
+ self.assertEqual(num_accounts_after, num_accounts_before)
+
+ def test_create_port_mappings(self):
+ # Given we have a wim and datacenter without any port mappings
+ self.populate([{'nfvo_tenants': eg.tenant(0)}] +
+ eg.datacenter_set(888, 0) +
+ eg.wim_set(999, 0))
+
+ # when a POST /<tenant_id>/wims/<wim_id>/port_mapping arrives
+ response = self.app.post_json(
+ '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim999')),
+ {'wim_port_mapping': [{
+ 'datacenter_name': 'dc888',
+ 'pop_wan_mappings': [
+ {'device_id': 'AA:AA:AA:AA:AA:AA:AA:AA',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
+ 'mapping_type': 'dpid-port',
+ 'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:BB',
+ 'wan_switch_port': 1
+ }}
+ ]}
+ ]})
+
+ # the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and port mappings should be stored in the database
+ port_mapping = self.db.get_rows(FROM='wim_port_mappings')
+ self.assertEqual(len(port_mapping), 1)
+
+ def test_get_port_mappings(self):
+ # Given WIMS and datacenters exist with port mappings between them
+ self.populate()
+ # when a GET /<tenant_id>/wims/<wim_id>/port_mapping arrives
+ response = self.app.get(
+ '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+ # the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and we should see port mappings for each WIM, datacenter pair
+ mappings = response.json['wim_port_mapping']
+ self.assertEqual(len(mappings), eg.NUM_DATACENTERS)
+ # ^ In the fixture set all the datacenters are connected to all wims
+
+ def test_delete_port_mappings(self):
+ # Given WIMS and datacenters exist with port mappings between them
+ self.populate()
+ num_mappings_before = self.count('wim_port_mappings')
+
+ # when a DELETE /<tenant_id>/wims/<wim_id>/port_mapping arrives
+ response = self.app.delete(
+ '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+ # the request should be well succeeded
+ self.assertEqual(response.status_code, OK)
+ # and the number of port mappings should decrease
+ num_mappings_after = self.count('wim_port_mappings')
+ self.assertEqual(num_mappings_after,
+ num_mappings_before - eg.NUM_DATACENTERS)
+ # ^ In the fixture set all the datacenters are connected to all wims
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+from itertools import chain
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+ TestCaseWithDatabasePerTest,
+ disable_foreign_keys,
+ uuid
+)
+from ..persistence import (
+ WimPersistence,
+ hide_confidential_fields,
+ serialize_fields,
+ unserialize_fields
+)
+
+
+class TestPersistenceUtils(unittest.TestCase):
+ def test_hide_confidential_fields(self):
+ example = {
+ 'password': '123456',
+ 'nested.password': '123456',
+ 'nested.secret': None,
+ }
+ result = hide_confidential_fields(example,
+ fields=('password', 'secret'))
+ for field in 'password', 'nested.password':
+ assert result[field].startswith('***')
+ self.assertIs(result['nested.secret'], None)
+
+ def test_serialize_fields(self):
+ example = {
+ 'config': dict(x=1),
+ 'nested.info': [1, 2, 3],
+ 'nested.config': None
+ }
+ result = serialize_fields(example, fields=('config', 'info'))
+ for field in 'config', 'nested.info':
+ self.assertIsInstance(result[field], str)
+ self.assertIs(result['nested.config'], None)
+
+ def test_unserialize_fields(self):
+ example = {
+ 'config': '{"x": 1}',
+ 'nested.info': '[1,2,3]',
+ 'nested.config': None,
+ 'confidential.info': '{"password": "abcdef"}'
+ }
+ result = unserialize_fields(example, fields=('config', 'info'))
+ self.assertEqual(result['config'], dict(x=1))
+ self.assertEqual(result['nested.info'], [1, 2, 3])
+ self.assertIs(result['nested.config'], None)
+ self.assertNotEqual(result['confidential.info']['password'], 'abcdef')
+ assert result['confidential.info']['password'].startswith('***')
+
+
+class TestWimPersistence(TestCaseWithDatabasePerTest):
+ def setUp(self):
+ super(TestWimPersistence, self).setUp()
+ self.persist = WimPersistence(self.db)
+
+ def populate(self, seeds=None):
+ super(TestWimPersistence, self).populate(seeds or eg.consistent_set())
+
+ def test_query_offset(self):
+ # Given a database contains 4 records
+ self.populate([{'wims': [eg.wim(i) for i in range(4)]}])
+
+ # When we query using a limit of 2 and a offset of 1
+ results = self.persist.query('wims',
+ ORDER_BY='name', LIMIT=2, OFFSET=1)
+ # Then we should have 2 results, skipping the first record
+ names = [r['name'] for r in results]
+ self.assertItemsEqual(names, ['wim1', 'wim2'])
+
+ def test_get_wim_account_by_wim_tenant(self):
+ # Given a database contains WIM accounts associated to Tenants
+ self.populate()
+
+ # when we retrieve the account using wim and tenant
+ wim_account = self.persist.get_wim_account_by(
+ uuid('wim0'), uuid('tenant0'))
+
+ # then the right record should be returned
+ self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+ self.assertEqual(wim_account['name'], 'wim-account00')
+ self.assertEqual(wim_account['user'], 'user00')
+
+ def test_get_wim_account_by_wim_tenant__names(self):
+ # Given a database contains WIM accounts associated to Tenants
+ self.populate()
+
+ # when we retrieve the account using wim and tenant
+ wim_account = self.persist.get_wim_account_by(
+ 'wim0', 'tenant0')
+
+ # then the right record should be returned
+ self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+ self.assertEqual(wim_account['name'], 'wim-account00')
+ self.assertEqual(wim_account['user'], 'user00')
+
+ def test_get_wim_accounts_by_wim(self):
+ # Given a database contains WIM accounts associated to Tenants
+ self.populate()
+
+ # when we retrieve the accounts using wim
+ wim_accounts = self.persist.get_wim_accounts_by(uuid('wim0'))
+
+ # then the right records should be returned
+ self.assertEqual(len(wim_accounts), eg.NUM_TENANTS)
+ for account in wim_accounts:
+ self.assertEqual(account['wim_id'], uuid('wim0'))
+
+ def test_get_wim_port_mappings(self):
+ # Given a database with WIMs, datacenters and port-mappings
+ self.populate()
+
+ # when we retrieve the port mappings for a list of datacenters
+ # using either names or uuids
+ for criteria in ([uuid('dc0'), uuid('dc1')], ['dc0', 'dc1']):
+ mappings = self.persist.get_wim_port_mappings(datacenter=criteria)
+
+ # then each result should have a datacenter_id
+ datacenters = [m['datacenter_id'] for m in mappings]
+ for datacenter in datacenters:
+ self.assertIn(datacenter, [uuid('dc0'), uuid('dc1')])
+
+ # a wim_id
+ wims = [m['wim_id'] for m in mappings]
+ for wim in wims:
+ self.assertIsNot(wim, None)
+
+ # and a array of pairs 'wan' <> 'pop' connections
+ pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
+ self.assertEqual(len(list(pairs)), 2 * eg.NUM_WIMS)
+
+ def test_get_wim_port_mappings_multiple(self):
+ # Given we have more then one connection in a datacenter managed by the
+ # WIM
+ self.populate()
+ self.populate([{
+ 'wim_port_mappings': [
+ eg.wim_port_mapping(
+ 0, 0,
+ pop_dpid='CC:CC:CC:CC:CC:CC:CC:CC',
+ wan_dpid='DD:DD:DD:DD:DD:DD:DD:DD'),
+ eg.wim_port_mapping(
+ 0, 0,
+ pop_dpid='EE:EE:EE:EE:EE:EE:EE:EE',
+ wan_dpid='FF:FF:FF:FF:FF:FF:FF:FF')]}])
+
+ # when we retrieve the port mappings for the wim and datacenter:
+ mappings = (
+ self.persist.get_wim_port_mappings(wim='wim0', datacenter='dc0'))
+
+ # then it should return just a single result, grouped by wim and
+ # datacenter
+ self.assertEqual(len(mappings), 1)
+ self.assertEqual(mappings[0]['wim_id'], uuid('wim0'))
+ self.assertEqual(mappings[0]['datacenter_id'], uuid('dc0'))
+
+ self.assertEqual(len(mappings[0]['pop_wan_mappings']), 3)
+
+ # when we retreive the mappings for more then one wim/datacenter
+ # the grouping should still work properly
+ mappings = self.persist.get_wim_port_mappings(
+ wim=['wim0', 'wim1'], datacenter=['dc0', 'dc1'])
+ self.assertEqual(len(mappings), 4)
+ pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
+ self.assertEqual(len(list(pairs)), 6)
+
+ def test_get_actions_in_group(self):
+ # Given a good number of wim actions exist in the database
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+ eg.wim_actions('FIND', num_links=8, **kwargs) +
+ eg.wim_actions('START', num_links=8, **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions': eg.instance_action(**kwargs)},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we retrieve them in groups
+ limit = 5
+ results = self.persist.get_actions_in_groups(
+ uuid('wim-account00'), ['instance_wim_nets'], group_limit=limit)
+
+ # Then we should have N groups where N == limit
+ self.assertEqual(len(results), limit)
+ for _, task_list in results:
+ # And since for each link we have create 3 actions (create, find,
+ # start), we should find them in each group
+ self.assertEqual(len(task_list), 3)
+
+ @disable_foreign_keys
+ def test_update_instance_action_counters(self):
+ # Given we have one instance action in the database with 2 incomplete
+ # tasks
+ action = eg.instance_action(num_tasks=2)
+ self.populate([{'instance_actions': action}])
+ # When we update the done counter by 0, nothing should happen
+ self.persist.update_instance_action_counters(action['uuid'], done=0)
+ result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+ self.assertEqual(result['number_done'], 0)
+ self.assertEqual(result['number_failed'], 0)
+ # When we update the done counter by 2, number_done should be 2
+ self.persist.update_instance_action_counters(action['uuid'], done=2)
+ result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+ self.assertEqual(result['number_done'], 2)
+ self.assertEqual(result['number_failed'], 0)
+ # When we update the done counter by -1, and the failed counter by 1
+ self.persist.update_instance_action_counters(
+ action['uuid'], done=-1, failed=1)
+ # Then we should see 1 and 1
+ result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+ self.assertEqual(result['number_done'], 1)
+ self.assertEqual(result['number_failed'], 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+from difflib import unified_diff
+from operator import itemgetter
+from time import time
+
+import json
+
+from unittest.mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+ TestCaseWithDatabasePerTest,
+ disable_foreign_keys,
+ uuid
+)
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+from ..wim_thread import WimThread
+
+
+ignore_connector = patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())
+
+
+def _repr(value):
+ return json.dumps(value, indent=4, sort_keys=True)
+
+
+@ignore_connector
+class TestWimThreadWithDb(TestCaseWithDatabasePerTest):
+ def setUp(self):
+ super(TestWimThreadWithDb, self).setUp()
+ self.persist = WimPersistence(self.db)
+ wim = eg.wim(0)
+ account = eg.wim_account(0, 0)
+ account['wim'] = wim
+ self.thread = WimThread(self.persist, {}, account)
+ self.thread.connector = MagicMock()
+
+ def assertTasksEqual(self, left, right):
+ fields = itemgetter('item', 'item_id', 'action', 'status')
+ left_ = (t.as_dict() for t in left)
+ left_ = [fields(t) for t in left_]
+ right_ = [fields(t) for t in right]
+
+ try:
+ self.assertItemsEqual(left_, right_)
+ except AssertionError:
+ print('left', _repr(left))
+ print('left', len(left_), 'items')
+ print('right', len(right_), 'items')
+ result = list(unified_diff(_repr(sorted(left_)).split('\n'),
+ _repr(sorted(right_)).split('\n'),
+ 'left', 'right'))
+ print('diff:\n', '\n'.join(result))
+ raise
+
+ def test_reload_actions__all_create(self):
+ # Given we have 3 CREATE actions stored in the database
+ actions = eg.wim_actions('CREATE',
+ action_id=uuid('action0'), num_links=3)
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions':
+ eg.instance_action(action_id=uuid('action0'))},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions()
+ # All of them should be inserted as pending
+ self.assertTasksEqual(self.thread.pending_tasks, actions)
+
+ def test_reload_actions__all_refresh(self):
+ # Given just DONE tasks are in the database
+ actions = eg.wim_actions(status='DONE',
+ action_id=uuid('action0'), num_links=3)
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions':
+ eg.instance_action(action_id=uuid('action0'))},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions()
+ # All of them should be inserted as refresh
+ self.assertTasksEqual(self.thread.refresh_tasks, actions)
+
+ def test_reload_actions__grouped(self):
+ # Given we have 2 tasks for the same item in the database
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('CREATE', **kwargs) +
+ eg.wim_actions('FIND', **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions': eg.instance_action(**kwargs)},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions()
+ # Just one group should be created
+ self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+ def test_reload_actions__delete_scheduled(self):
+ # Given we have 3 tasks for the same item in the database, but one of
+ # them is a DELETE task and it is SCHEDULED
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('CREATE', **kwargs) +
+ eg.wim_actions('FIND', **kwargs) +
+ eg.wim_actions('DELETE', status='SCHEDULED', **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions': eg.instance_action(**kwargs)},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions()
+ # Just one group should be created
+ self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+ def test_reload_actions__delete_done(self):
+ # Given we have 3 tasks for the same item in the database, but one of
+ # them is a DELETE task and it is not SCHEDULED
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('CREATE', **kwargs) +
+ eg.wim_actions('FIND', **kwargs) +
+ eg.wim_actions('DELETE', status='DONE', **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions': eg.instance_action(**kwargs)},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions()
+ # No pending task should be found
+ self.assertEqual(self.thread.pending_tasks, [])
+
+ def test_reload_actions__batch(self):
+ # Given the group_limit is 10, and we have 24
+ group_limit = 10
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+ eg.wim_actions('FIND', num_links=8, **kwargs) +
+ eg.wim_actions('FIND', num_links=8, **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate([
+ {'nfvo_tenants': eg.tenant()}
+ ] + eg.wim_set() + [
+ {'instance_actions': eg.instance_action(**kwargs)},
+ {'vim_wim_actions': actions}
+ ])
+
+ # When we reload the tasks
+ self.thread.reload_actions(group_limit)
+
+ # Then we should still see the actions in memory properly
+ self.assertTasksEqual(self.thread.pending_tasks, actions)
+ self.assertEqual(len(self.thread.grouped_tasks.values()), 8)
+
+ @disable_foreign_keys
+ def test_process_list__refresh(self):
+ update_wan_link = MagicMock(wrap=self.persist.update_wan_link)
+ update_action = MagicMock(wrap=self.persist.update_wan_link)
+ patches = dict(update_wan_link=update_wan_link,
+ update_action=update_action)
+
+ with patch.multiple(self.persist, **patches):
+ # Given we have 2 tasks in the refresh queue
+ kwargs = {'action_id': uuid('action0')}
+ actions = (eg.wim_actions('FIND', 'DONE', **kwargs) +
+ eg.wim_actions('CREATE', 'BUILD', **kwargs))
+ for i, action in enumerate(actions):
+ action['task_index'] = i
+
+ self.populate(
+ [{'instance_wim_nets': eg.instance_wim_nets()}] +
+ [{'instance_actions':
+ eg.instance_action(num_tasks=2, **kwargs)}] +
+ [{'vim_wim_actions': actions}])
+
+ self.thread.insert_pending_tasks(actions)
+
+ # When we process the refresh list
+ processed = self.thread.process_list('refresh')
+
+ # Then we should have 2 updates
+ self.assertEqual(processed, 2)
+
+ # And the database should be updated accordingly
+ self.assertEqual(update_wan_link.call_count, 2)
+ self.assertEqual(update_action.call_count, 2)
+
+ @disable_foreign_keys
+ def test_delete_superseed_create(self):
+ # Given we insert a scheduled CREATE task
+ instance_action = eg.instance_action(num_tasks=1)
+ self.thread.pending_tasks = []
+ engine = WimEngine(persistence=self.persist)
+ self.addCleanup(engine.stop_threads)
+ wan_links = eg.instance_wim_nets()
+ create_actions = engine.create_actions(wan_links)
+ delete_actions = engine.delete_actions(wan_links)
+ engine.incorporate_actions(create_actions + delete_actions,
+ instance_action)
+
+ self.populate(instance_actions=instance_action,
+ vim_wim_actions=create_actions + delete_actions)
+
+ self.thread.insert_pending_tasks(create_actions)
+
+ assert self.thread.pending_tasks[0].is_scheduled
+
+ # When we insert the equivalent DELETE task
+ self.thread.insert_pending_tasks(delete_actions)
+
+ # Then the CREATE task should be superseded
+ self.assertEqual(self.thread.pending_tasks[0].action, 'CREATE')
+ assert self.thread.pending_tasks[0].is_superseded
+
+ self.thread.process_list('pending')
+ self.thread.process_list('refresh')
+ self.assertFalse(self.thread.pending_tasks)
+
+
+@ignore_connector
+class TestWimThread(unittest.TestCase):
+ def setUp(self):
+ wim = eg.wim(0)
+ account = eg.wim_account(0, 0)
+ account['wim'] = wim
+ self.persist = MagicMock()
+ self.thread = WimThread(self.persist, {}, account)
+ self.thread.connector = MagicMock()
+
+ super(TestWimThread, self).setUp()
+
+ def test_process_refresh(self):
+ # Given we have 30 tasks in the refresh queue
+ kwargs = {'action_id': uuid('action0')}
+ actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+ self.thread.insert_pending_tasks(actions)
+
+ # When we process the refresh list
+ processed = self.thread.process_list('refresh')
+
+ # Then we should have REFRESH_BATCH updates
+ self.assertEqual(processed, self.thread.BATCH)
+
+ def test_process_refresh__with_superseded(self):
+ # Given we have 30 tasks but 15 of them are superseded
+ kwargs = {'action_id': uuid('action0')}
+ actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+ self.thread.insert_pending_tasks(actions)
+ for task in self.thread.refresh_tasks[0:30:2]:
+ task.status = 'SUPERSEDED'
+
+ now = time()
+
+ # When we call the refresh_elements
+ processed = self.thread.process_list('refresh')
+
+ # Then we should have 25 updates (since SUPERSEDED updates are cheap,
+ # they are not counted for the limits)
+ self.assertEqual(processed, 25)
+
+ # The SUPERSEDED tasks should be removed, 5 tasks should be untouched,
+ # and 10 tasks should be rescheduled
+ refresh_tasks = self.thread.refresh_tasks
+ old = [t for t in refresh_tasks if t.process_at <= now]
+ new = [t for t in refresh_tasks if t.process_at > now]
+ self.assertEqual(len(old), 5)
+ self.assertEqual(len(new), 10)
+ self.assertEqual(len(self.thread.refresh_tasks), 15)
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,flake8,radon
+skipsdist = True
+
+[testenv]
+passenv = *_DB_*
+setenv =
+ PATH = {env:PATH}:{toxinidir}/../../database_utils
+ DBUTILS = {toxinidir}/../../database_utils
+changedir = {toxinidir}
+commands =
+ nosetests -v -d {posargs:tests}
+deps =
+ WebTest
+ logging
+ bottle
+ coverage
+ jsonschema
+ mock
+ mysqlclient
+ nose
+ six
+ PyYaml
+ paramiko
+ ipdb
+ requests
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+ radon cc --show-complexity --total-average {posargs:.}
+ radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+ tests
+ tests/*
+ */test_*
+ .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+ .tox
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+import json
+from pprint import pformat
+# from sys import exc_info
+from time import time
+
+from ..utils import filter_dict_keys as filter_keys
+from ..utils import merge_dicts, remove_none_items, safe_get, truncate
+from .actions import CreateAction, DeleteAction, FindAction
+from .errors import (
+ InconsistentState,
+ NoRecordFound,
+ NoExternalPortFound
+)
+from osm_ro_plugin.sdnconn import SdnConnectorError
+
+INSTANCE_NET_STATUS_ERROR = ('DOWN', 'ERROR', 'VIM_ERROR',
+ 'DELETED', 'SCHEDULED_DELETION')
+INSTANCE_NET_STATUS_PENDING = ('BUILD', 'INACTIVE', 'SCHEDULED_CREATION')
+INSTANCE_VM_STATUS_ERROR = ('ERROR', 'VIM_ERROR',
+ 'DELETED', 'SCHEDULED_DELETION')
+
+
+class RefreshMixin(object):
+ def refresh(self, connector, persistence):
+ """Ask the external WAN Infrastructure Manager system for updates on
+ the status of the task.
+
+ Arguments:
+ connector: object with API for accessing the WAN
+ Infrastructure Manager system
+ persistence: abstraction layer for the database
+ """
+ fields = ('sdn_status', 'sdn_info', 'error_msg')
+ result = dict.fromkeys(fields)
+
+ try:
+ result.update(
+ connector
+ .get_connectivity_service_status(self.wim_internal_id))
+ except SdnConnectorError as ex:
+ self.logger.exception(ex)
+ result.update(sdn_status='WIM_ERROR', error_msg=truncate(ex))
+
+ result = filter_keys(result, fields)
+
+ action_changes = remove_none_items({
+ 'extra': merge_dicts(self.extra, result),
+ 'status': 'BUILD' if result['sdn_status'] == 'BUILD' else None,
+ 'error_msg': result['error_msg'],
+ 'modified_at': time()})
+ link_changes = merge_dicts(result, status=result.pop('sdn_status'))
+ # ^ Rename field: sdn_status => status
+
+ persistence.update_wan_link(self.item_id,
+ remove_none_items(link_changes))
+
+ self.save(persistence, **action_changes)
+
+ return result
+
+
+class WanLinkCreate(RefreshMixin, CreateAction):
+ def fail(self, persistence, reason, status='FAILED'):
+ changes = {'status': 'ERROR', 'error_msg': truncate(reason)}
+ persistence.update_wan_link(self.item_id, changes)
+ return super(WanLinkCreate, self).fail(persistence, reason, status)
+
+ def process(self, connector, persistence, ovim):
+ """Process the current task.
+ First we check if all the dependencies are ready,
+ then we call ``execute`` to actually execute the action.
+
+ Arguments:
+ connector: object with API for accessing the WAN
+ Infrastructure Manager system
+ persistence: abstraction layer for the database
+ ovim: instance of openvim, abstraction layer that enable
+ SDN-related operations
+ """
+ wan_link = persistence.get_by_uuid('instance_wim_nets', self.item_id)
+
+ # First we check if all the dependencies are solved
+ instance_nets = persistence.get_instance_nets(
+ wan_link['instance_scenario_id'], wan_link['sce_net_id'])
+
+ try:
+ dependency_statuses = [n['status'] for n in instance_nets]
+ except KeyError:
+ self.logger.debug('`status` not found in\n\n%s\n\n',
+ json.dumps(instance_nets, indent=4))
+ errored = [instance_nets[i]
+ for i, status in enumerate(dependency_statuses)
+ if status in INSTANCE_NET_STATUS_ERROR]
+ if errored:
+ return self.fail(
+ persistence,
+ 'Impossible to stablish WAN connectivity due to an issue '
+ 'with the local networks:\n\t' +
+ '\n\t'.join('{uuid}: {status}'.format(**n) for n in errored))
+
+ pending = [instance_nets[i]
+ for i, status in enumerate(dependency_statuses)
+ if status in INSTANCE_NET_STATUS_PENDING]
+ if pending:
+ return self.defer(
+ persistence,
+ 'Still waiting for the local networks to be active:\n\t' +
+ '\n\t'.join('{uuid}: {status}'.format(**n) for n in pending))
+
+ return self.execute(connector, persistence, ovim, instance_nets)
+
+ def _get_connection_point_info(self, persistence, ovim, instance_net):
+ """Retrieve information about the connection PoP <> WAN
+
+ Arguments:
+ persistence: object that encapsulates persistence logic
+ (e.g. db connection)
+ ovim: object that encapsulates network management logic (openvim)
+ instance_net: record with the information about a local network
+ (inside a VIM). This network will be connected via a WAN link
+ to a different network in a distinct VIM.
+ This method is used to trace what would be the way this network
+ can be accessed from the outside world.
+
+ Returns:
+ dict: Record representing the wan_port_mapping associated to the
+ given instance_net. The expected fields are:
+ **wim_id**, **datacenter_id**, **device_id** (the local
+ network is expected to be connected at this switch dpid),
+ **device_interface_id**, **service_endpoint_id**,
+ **service_mapping_info**.
+ """
+ # First, we need to find a route from the datacenter to the outside
+ # world. For that, we can use the rules given in the datacenter
+ # configuration:
+ datacenter_id = instance_net['datacenter_id']
+ datacenter = persistence.get_datacenter_by(datacenter_id)
+ rules = safe_get(datacenter, 'config.external_connections', {}) or {}
+ vim_info = instance_net.get('vim_info', {}) or {}
+ # Alternatively, we can look for it, using the SDN assist
+ external_port = (self._evaluate_rules(rules, vim_info) or
+ self._get_port_sdn(ovim, instance_net))
+
+ if not external_port:
+ raise NoExternalPortFound(instance_net)
+
+ # Then, we find the WAN switch that is connected to this external port
+ try:
+ wim_account = persistence.get_wim_account_by(
+ uuid=self.wim_account_id)
+
+ criteria = {
+ 'wim_id': wim_account['wim_id'],
+ 'device_id': external_port[0],
+ 'device_interface_id': external_port[1],
+ 'datacenter_id': datacenter_id}
+
+ wan_port_mapping = persistence.query_one(
+ FROM='wim_port_mappings',
+ WHERE=criteria)
+ except NoRecordFound as e:
+ ex = InconsistentState('No WIM port mapping found:'
+ 'wim_account: {}\ncriteria:\n{}'.format(
+ self.wim_account_id, pformat(criteria)))
+ raise ex from e
+
+ # It is important to return encapsulation information if present
+ mapping = merge_dicts(
+ wan_port_mapping.get('service_mapping_info'),
+ filter_keys(vim_info, ('encapsulation_type', 'encapsulation_id'))
+ )
+
+ return merge_dicts(wan_port_mapping, service_mapping_info=mapping)
+
+ def _get_port_sdn(self, ovim, instance_net):
+ try:
+ local_port_mapping = ovim.get_ports(instance_net['sdn_net_id'])
+
+ if local_port_mapping:
+ return (local_port_mapping[0]['switch_dpid'],
+ local_port_mapping[0]['switch_port'])
+ except: # noqa
+ self.logger.exception('Problems when calling OpenVIM')
+
+ self.logger.debug("No ports found for sdn_net_id='{}'", instance_net['sdn_net_id'])
+ return None
+
+ def _evaluate_rules(self, rules, vim_info):
+ """Given a ``vim_info`` dict from a ``instance_net`` record, evaluate
+ the set of rules provided during the VIM/datacenter registration to
+ determine an external port used to connect that VIM/datacenter to
+ other ones where different parts of the NS will be instantiated.
+
+ For example, considering a VIM/datacenter is registered like the
+ following::
+
+ vim_record = {
+ "uuid": ...
+ ... # Other properties associated with the VIM/datacenter
+ "config": {
+ ... # Other configuration
+ "external_connections": [
+ {
+ "condition": {
+ "provider:physical_network": "provider_net1",
+ ... # This method will look up all the keys listed here
+ # in the instance_nets.vim_info dict and compare the
+ # values. When all the values match, the associated
+ # vim_external_port will be selected.
+ },
+ "vim_external_port": {"switch": "switchA", "port": "portB"}
+ },
+ ... # The user can provide as many rules as needed, however
+ # only the first one to match will be applied.
+ ]
+ }
+ }
+
+ When an ``instance_net`` record is instantiated in that datacenter with
+ the following information::
+
+ instance_net = {
+ "uuid": ...
+ ...
+ "vim_info": {
+ ...
+ "provider_physical_network": "provider_net1",
+ }
+ }
+
+ Then, ``switchA`` and ``portB`` will be used to stablish the WAN
+ connection.
+
+ Arguments:
+ rules (list): Set of dicts containing the keys ``condition`` and
+ ``vim_external_port``. This list should be extracted from
+ ``vim['config']['external_connections']`` (as stored in the
+ database).
+ vim_info (dict): Information given by the VIM Connector, against
+ which the rules will be evaluated.
+
+ Returns:
+ tuple: switch id (local datacenter switch) and port or None if
+ the rule does not match.
+ """
+ rule = next((r for r in rules if self._evaluate_rule(r, vim_info)), {})
+ if 'vim_external_port' not in rule:
+ self.logger.debug('No external port found.\n'
+ 'rules:\n%r\nvim_info:\n%r\n\n', rules, vim_info)
+ return None
+
+ return (rule['vim_external_port']['switch'],
+ rule['vim_external_port']['port'])
+
+ @staticmethod
+ def _evaluate_rule(rule, vim_info):
+ """Evaluate the conditions from a single rule to ``vim_info`` and
+ determine if the rule should be applicable or not.
+
+ Please check :obj:`~._evaluate_rules` for more information.
+
+ Arguments:
+ rule (dict): Data structure containing the keys ``condition`` and
+ ``vim_external_port``. This should be one of the elements in
+ ``vim['config']['external_connections']`` (as stored in the
+ database).
+ vim_info (dict): Information given by the VIM Connector, against
+ which the rules will be evaluated.
+
+ Returns:
+ True or False: If all the conditions are met.
+ """
+ condition = rule.get('condition', {}) or {}
+ return all(safe_get(vim_info, k) == v for k, v in condition.items())
+
+ @staticmethod
+ def _derive_connection_point(wan_info):
+ point = {'service_endpoint_id': wan_info['service_endpoint_id']}
+ # TODO: Cover other scenarios, e.g. VXLAN.
+ details = wan_info.get('service_mapping_info', {})
+ if details.get('encapsulation_type') == 'vlan':
+ point['service_endpoint_encapsulation_type'] = 'dot1q'
+ point['service_endpoint_encapsulation_info'] = {
+ 'vlan': details['encapsulation_id'],
+ 'switch_dpid': wan_info['switch_dpid'],
+ 'switch_port': wan_info['switch_port']
+ }
+ else:
+ point['service_endpoint_encapsulation_type'] = 'none'
+ return point
+
+ @staticmethod
+ def _derive_service_type(connection_points):
+ # TODO: add multipoint and L3 connectivity.
+ if len(connection_points) == 2:
+ return 'ELINE'
+ else:
+ # added to support DPB WIM connector
+ return 'ELAN'
+
+ def _update_persistent_data(self, persistence, service_uuid, conn_info):
+ """Store plugin/connector specific information in the database"""
+ persistence.update_wan_link(self.item_id, {
+ 'wim_internal_id': service_uuid,
+ 'sdn_info': {'conn_info': conn_info},
+ 'status': 'BUILD'})
+
+ def execute(self, connector, persistence, ovim, instance_nets):
+ """Actually execute the action, since now we are sure all the
+ dependencies are solved
+ """
+ try:
+ wan_info = (self._get_connection_point_info(persistence, ovim, net)
+ for net in instance_nets)
+ connection_points = [self._derive_connection_point(w)
+ for w in wan_info]
+
+ uuid, info = connector.create_connectivity_service(
+ self._derive_service_type(connection_points),
+ connection_points
+ # TODO: other properties, e.g. bandwidth
+ )
+ except (SdnConnectorError, InconsistentState,
+ NoExternalPortFound) as ex:
+ self.logger.exception(ex)
+ return self.fail(
+ persistence,
+ 'Impossible to stablish WAN connectivity.\n\t{}'.format(ex))
+
+ self.logger.debug('WAN connectivity established %s\n%s\n',
+ uuid, json.dumps(info, indent=4))
+ self.wim_internal_id = uuid
+ self._update_persistent_data(persistence, uuid, info)
+ self.succeed(persistence)
+ return uuid
+
+
+class WanLinkDelete(DeleteAction):
+ def succeed(self, persistence):
+ try:
+ persistence.update_wan_link(self.item_id, {'status': 'DELETED'})
+ except NoRecordFound:
+ self.logger.debug('%s(%s) record already deleted',
+ self.item, self.item_id)
+
+ return super(WanLinkDelete, self).succeed(persistence)
+
+ def get_wan_link(self, persistence):
+ """Retrieve information about the wan_link
+
+ It might be cached, or arrive from the database
+ """
+ if self.extra.get('wan_link'):
+ # First try a cached version of the data
+ return self.extra['wan_link']
+
+ return persistence.get_by_uuid(
+ 'instance_wim_nets', self.item_id)
+
+ def process(self, connector, persistence, ovim):
+ """Delete a WAN link previously created"""
+ wan_link = self.get_wan_link(persistence)
+ if 'ERROR' in (wan_link.get('status') or ''):
+ return self.fail(
+ persistence,
+ 'Impossible to delete WAN connectivity, '
+ 'it was never successfully established:'
+ '\n\t{}'.format(wan_link['error_msg']))
+
+ internal_id = wan_link.get('wim_internal_id') or self.internal_id
+
+ if not internal_id:
+ self.logger.debug('No wim_internal_id found in\n%s\n%s\n'
+ 'Assuming no network was created yet, '
+ 'so no network have to be deleted.',
+ json.dumps(wan_link, indent=4),
+ json.dumps(self.as_dict(), indent=4))
+ return self.succeed(persistence)
+
+ try:
+ id = self.wim_internal_id
+ conn_info = safe_get(wan_link, 'sdn_info.conn_info')
+ self.logger.debug('Connection Service %s (wan_link: %s):\n%s\n',
+ id, wan_link['uuid'],
+ json.dumps(conn_info, indent=4))
+ result = connector.delete_connectivity_service(id, conn_info)
+ except (SdnConnectorError, InconsistentState) as ex:
+ self.logger.exception(ex)
+ return self.fail(
+ persistence,
+ 'Impossible to delete WAN connectivity.\n\t{}'.format(ex))
+
+ self.logger.debug('WAN connectivity removed %s', result)
+ self.succeed(persistence)
+
+ return result
+
+
+class WanLinkFind(RefreshMixin, FindAction):
+ pass
+
+
+ACTIONS = {
+ 'CREATE': WanLinkCreate,
+ 'DELETE': WanLinkDelete,
+ 'FIND': WanLinkFind,
+}
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""
+Thread-based interaction with WIMs. Tasks are stored in the
+database (vim_wim_actions table) and processed sequentially
+
+Please check the Action class for information about the content of each action.
+"""
+
+import logging
+import threading
+from contextlib import contextmanager
+from functools import partial
+from itertools import islice, chain, takewhile
+from operator import itemgetter, attrgetter
+# from sys import exc_info
+from time import time, sleep
+
+import queue
+
+from . import wan_link_actions
+from ..utils import ensure, partition, pipe
+from .actions import IGNORE, PENDING, REFRESH
+from .errors import (
+ DbBaseException,
+ QueueFull,
+ InvalidParameters as Invalid,
+ UndefinedAction,
+)
+from osm_ro_plugin.sdn_failing import SdnFailingConnector
+from osm_ro_plugin.sdnconn import SdnConnectorError
+from osm_ro_plugin.sdn_dummy import SdnDummyConnector
+
+ACTIONS = {
+ 'instance_wim_nets': wan_link_actions.ACTIONS
+}
+
+CONNECTORS = {
+ # "odl": wimconn_odl.OdlConnector,
+ "dummy": SdnDummyConnector,
+ # Add extra connectors here not managed via plugins
+}
+
+
+class WimThread(threading.Thread):
+ """Specialized task queue implementation that runs in an isolated thread.
+
+ Objects of this class have a few methods that are intended to be used
+ outside of the thread:
+
+ - start
+ - insert_task
+ - reload
+ - exit
+
+ All the other methods are used internally to manipulate/process the task
+ queue.
+ """
+ RETRY_SCHEDULED = 10 # 10 seconds
+ REFRESH_BUILD = 10 # 10 seconds
+ REFRESH_ACTIVE = 60 # 1 minute
+ BATCH = 10 # 10 actions per round
+ QUEUE_SIZE = 2000
+ RECOVERY_TIME = 5 # Sleep 5s to leave the system some time to recover
+ MAX_RECOVERY_TIME = 180
+ WAITING_TIME = 1 # Wait 1s for taks to arrive, when there are none
+
+ def __init__(self, persistence, plugins, wim_account, logger=None, ovim=None):
+ """Init a thread.
+
+ Arguments:
+ persistence: Database abstraction layer
+ plugins: dictionary with the vim/sdn plugins
+ wim_account: Record containing wim_account, tenant and wim
+ information.
+ """
+ name = '{}.{}.{}'.format(wim_account['wim']['name'],
+ wim_account['name'], wim_account['uuid'])
+ super(WimThread, self).__init__(name=name)
+ self.plugins = plugins
+ if "rosdn_dummy" not in self.plugins:
+ self.plugins["rosdn_dummy"] = SdnDummyConnector
+
+ self.name = name
+ self.connector = None
+ self.wim_account = wim_account
+
+ self.logger = logger or logging.getLogger('openmano.wim.'+self.name)
+ self.persist = persistence
+ self.ovim = ovim
+
+ self.task_queue = queue.Queue(self.QUEUE_SIZE)
+
+ self.refresh_tasks = []
+ """Time ordered task list for refreshing the status of WIM nets"""
+
+ self.pending_tasks = []
+ """Time ordered task list for creation, deletion of WIM nets"""
+
+ self.grouped_tasks = {}
+ """ It contains all the creation/deletion pending tasks grouped by
+ its concrete vm, net, etc
+
+ <item><item_id>:
+ - <task1> # e.g. CREATE task
+ <task2> # e.g. DELETE task
+ """
+
+ self._insert_task = {
+ PENDING: partial(self.schedule, list_name='pending'),
+ REFRESH: partial(self.schedule, list_name='refresh'),
+ IGNORE: lambda task, *_, **__: task.save(self.persist)}
+ """Send the task to the right processing queue"""
+
+ def on_start(self):
+ """Run a series of procedures every time the thread (re)starts"""
+ self.connector = self.get_connector()
+ self.reload_actions()
+
+ def get_connector(self):
+ """Create an WimConnector instance according to the wim.type"""
+ error_msg = ''
+ account_id = self.wim_account['uuid']
+ try:
+ account = self.persist.get_wim_account_by(
+ uuid=account_id, hide=None) # Credentials need to be available
+ wim = account['wim']
+ mapping = self.persist.query('wim_port_mappings',
+ WHERE={'wim_id': wim['uuid']},
+ error_if_none=False)
+ if wim["type"] in CONNECTORS:
+ return CONNECTORS[wim['type']](wim, account, {'service_endpoint_mapping': mapping or []})
+ else: # load a plugin
+ return self.plugins["rosdn_" + wim["type"]](
+ wim, account, {'service_endpoint_mapping': mapping or []})
+ except DbBaseException as ex:
+ error_msg = ('Error when retrieving WIM account ({})\n'
+ .format(account_id)) + str(ex)
+ self.logger.error(error_msg, exc_info=True)
+ except KeyError as ex:
+ error_msg = ('Unable to find the WIM connector for WIM ({})\n'
+ .format(wim['type'])) + str(ex)
+ self.logger.error(error_msg)
+ except (SdnConnectorError, Exception) as ex:
+ # TODO: Remove the Exception class here when the connector class is
+ # ready
+ error_msg = ('Error when loading WIM connector for WIM ({})\n'
+ .format(wim['type'])) + str(ex)
+ self.logger.error(error_msg, exc_info=True)
+
+ error_msg_extra = ('Any task targeting WIM account {} ({}) will fail.'
+ .format(account_id, self.wim_account.get('name')))
+ self.logger.warning(error_msg_extra)
+ return SdnFailingConnector(error_msg + '\n' + error_msg_extra)
+
+ @contextmanager
+ def avoid_exceptions(self):
+ """Make a real effort to keep the thread alive, by avoiding the
+ exceptions. They are instead logged as a critical errors.
+ """
+ try:
+ yield
+ except Exception as ex:
+ self.logger.critical("Unexpected exception %s", ex, exc_info=True)
+ sleep(self.RECOVERY_TIME)
+
+ def reload_actions(self, group_limit=100):
+ """Read actions from database and reload them at memory.
+
+ This method will clean and reload the attributes ``refresh_tasks``,
+ ``pending_tasks`` and ``grouped_tasks``
+
+ Attributes:
+ group_limit (int): maximum number of action groups (those that
+ refer to the same ``<item, item_id>``) to be retrieved from the
+ database in each batch.
+ """
+
+ # First we clean the cache to let the garbage collector work
+ self.refresh_tasks = []
+ self.pending_tasks = []
+ self.grouped_tasks = {}
+
+ offset = 0
+
+ while True:
+ # Do things in batches
+ task_groups = self.persist.get_actions_in_groups(
+ self.wim_account['uuid'], item_types=('instance_wim_nets',),
+ group_offset=offset, group_limit=group_limit)
+ offset += (group_limit - 1) # Update for the next batch
+
+ if not task_groups:
+ break
+
+ pending_groups = (g for _, g in task_groups if is_pending_group(g))
+
+ for task_list in pending_groups:
+ with self.avoid_exceptions():
+ self.insert_pending_tasks(filter_pending_tasks(task_list))
+
+ self.logger.debug(
+ 'Reloaded wim actions pending: %d refresh: %d',
+ len(self.pending_tasks), len(self.refresh_tasks))
+
+ def insert_pending_tasks(self, task_list):
+ """Insert task in the list of actions being processed"""
+ task_list = [action_from(task, self.logger) for task in task_list]
+
+ for task in task_list:
+ group = task.group_key
+ self.grouped_tasks.setdefault(group, [])
+ # Each task can try to supersede the other ones,
+ # but just DELETE actions will actually do
+ task.supersede(self.grouped_tasks[group])
+ self.grouped_tasks[group].append(task)
+
+ # We need a separate loop so each task can check all the other
+ # ones before deciding
+ for task in task_list:
+ self._insert_task[task.processing](task)
+ self.logger.debug('Insert WIM task: %s (%s): %s %s',
+ task.id, task.status, task.action, task.item)
+
+ def schedule(self, task, when=None, list_name='pending'):
+ """Insert a task in the correct list, respecting the schedule.
+ The refreshing list is ordered by threshold_time (task.process_at)
+ It is assumed that this is called inside this thread
+
+ Arguments:
+ task (Action): object representing the task.
+ This object must implement the ``process`` method and inherit
+ from the ``Action`` class
+ list_name: either 'refresh' or 'pending'
+ when (float): unix time in seconds since as a float number
+ """
+ processing_list = {'refresh': self.refresh_tasks,
+ 'pending': self.pending_tasks}[list_name]
+
+ when = when or time()
+ task.process_at = when
+
+ schedule = (t.process_at for t in processing_list)
+ index = len(list(takewhile(lambda moment: moment <= when, schedule)))
+
+ processing_list.insert(index, task)
+ self.logger.debug(
+ 'Schedule of %s in "%s" - waiting position: %d (%f)',
+ task.id, list_name, index, task.process_at)
+
+ return task
+
+ def process_list(self, list_name='pending'):
+ """Process actions in batches and reschedule them if necessary"""
+ task_list, handler = {
+ 'refresh': (self.refresh_tasks, self._refresh_single),
+ 'pending': (self.pending_tasks, self._process_single)}[list_name]
+
+ now = time()
+ waiting = ((i, task) for i, task in enumerate(task_list)
+ if task.process_at is None or task.process_at <= now)
+
+ is_superseded = pipe(itemgetter(1), attrgetter('is_superseded'))
+ superseded, active = partition(is_superseded, waiting)
+ superseded = [(i, t.save(self.persist)) for i, t in superseded]
+
+ batch = islice(active, self.BATCH)
+ refreshed = [(i, handler(t)) for i, t in batch]
+
+ # Since pop changes the indexes in the list, we need to do it backwards
+ remove = sorted([i for i, _ in chain(refreshed, superseded)])
+ return len([task_list.pop(i) for i in reversed(remove)])
+
+ def _refresh_single(self, task):
+ """Refresh just a single task, and reschedule it if necessary"""
+ now = time()
+
+ result = task.refresh(self.connector, self.persist)
+ self.logger.debug('Refreshing WIM task: %s (%s): %s %s => %r',
+ task.id, task.status, task.action, task.item, result)
+
+ interval = self.REFRESH_BUILD if task.is_build else self.REFRESH_ACTIVE
+ self.schedule(task, now + interval, 'refresh')
+
+ return result
+
+ def _process_single(self, task):
+ """Process just a single task, and reschedule it if necessary"""
+ now = time()
+
+ result = task.process(self.connector, self.persist, self.ovim)
+ self.logger.debug('Executing WIM task: %s (%s): %s %s => %r',
+ task.id, task.status, task.action, task.item, result)
+
+ if task.action == 'DELETE':
+ del self.grouped_tasks[task.group_key]
+
+ self._insert_task[task.processing](task, now + self.RETRY_SCHEDULED)
+
+ return result
+
+ def insert_task(self, task):
+ """Send a message to the running thread
+
+ This function is supposed to be called outside of the WIM Thread.
+
+ Arguments:
+ task (str or dict): `"exit"`, `"reload"` or dict representing a
+ task. For more information about the fields in task, please
+ check the Action class.
+ """
+ try:
+ self.task_queue.put(task, False)
+ return None
+ except queue.Full as e:
+ ex = QueueFull(self.name)
+ raise ex from e
+
+ def reload(self):
+ """Send a message to the running thread to reload itself"""
+ self.insert_task('reload')
+
+ def exit(self):
+ """Send a message to the running thread to kill itself"""
+ self.insert_task('exit')
+
+ def run(self):
+ self.logger.debug('Starting: %s', self.name)
+ recovery_time = 0
+ while True:
+ self.on_start()
+ reload_thread = False
+ self.logger.debug('Reloaded: %s', self.name)
+
+ while True:
+ with self.avoid_exceptions():
+ while not self.task_queue.empty():
+ task = self.task_queue.get()
+ if isinstance(task, dict):
+ self.insert_pending_tasks([task])
+ elif isinstance(task, list):
+ self.insert_pending_tasks(task)
+ elif isinstance(task, str):
+ if task == 'exit':
+ self.logger.debug('Finishing: %s', self.name)
+ return 0
+ elif task == 'reload':
+ reload_thread = True
+ break
+ self.task_queue.task_done()
+
+ if reload_thread:
+ break
+
+ if not(self.process_list('pending') +
+ self.process_list('refresh')):
+ sleep(self.WAITING_TIME)
+
+ if isinstance(self.connector, SdnFailingConnector):
+ # Wait sometime to try instantiating the connector
+ # again and restart
+ # Increase the recovery time if restarting is not
+ # working (up to a limit)
+ recovery_time = min(self.MAX_RECOVERY_TIME,
+ recovery_time + self.RECOVERY_TIME)
+ sleep(recovery_time)
+ break
+ else:
+ recovery_time = 0
+
+ self.logger.debug("Finishing")
+
+
+def is_pending_group(group):
+ return all(task['action'] != 'DELETE' or
+ task['status'] == 'SCHEDULED'
+ for task in group)
+
+
+def filter_pending_tasks(group):
+ return (t for t in group
+ if (t['status'] == 'SCHEDULED' or
+ t['action'] in ('CREATE', 'FIND')))
+
+
+def action_from(record, logger=None, mapping=ACTIONS):
+ """Create an Action object from a action record (dict)
+
+ Arguments:
+ mapping (dict): Nested data structure that maps the relationship
+ between action properties and object constructors. This data
+ structure should be a dict with 2 levels of keys: item type and
+ action type. Example::
+ {'wan_link':
+ {'CREATE': WanLinkCreate}
+ ...}
+ ...}
+ record (dict): action information
+
+ Return:
+ (Action.Base): Object representing the action
+ """
+ ensure('item' in record, Invalid('`record` should contain "item"'))
+ ensure('action' in record, Invalid('`record` should contain "action"'))
+
+ try:
+ factory = mapping[record['item']][record['action']]
+ return factory(record, logger=logger)
+ except KeyError as e:
+ ex = UndefinedAction(record['item'], record['action'])
+ raise ex from e
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+from .sdnconn import SdnConnectorBase
+
+
+# TODO: Basically create this file
+
+class OdlConnector(SdnConnectorBase):
+ def get_connectivity_service_status(self, link_uuid):
+ raise NotImplementedError
+
+ def create_connectivity_service(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def delete_connectivity_service(self, link_uuid):
+ raise NotImplementedError
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+PyYAML
+bottle
+mysqlclient
+jsonschema
+paramiko
+requests==2.18.*
+netaddr
+logutils
+osm-im @ git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im
--- /dev/null
+#!/usr/bin/env python3
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# from distutils.core import setup
+# from distutils.command.install_data import install_data
+from setuptools import setup
+from os import system
+# import glob
+
+_name = 'osm_ro'
+_description = 'OSM Resource Orchestrator'
+_author = 'ETSI OSM'
+_author_email = 'alfonso.tiernosepulveda@telefonica.com'
+_maintainer = 'garciadeblas'
+_maintainer_email = 'gerardo.garciadeblas@telefonica.com'
+_license = 'Apache 2.0'
+_url = 'https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary'
+_requirements = [
+ # TODO py3 revise
+ "osm-im @ git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im",
+ "PyYAML",
+ "bottle",
+ "logutils",
+ "jsonschema",
+ "paramiko",
+ "mysqlclient",
+
+ # common to VIMS
+ "requests",
+ "netaddr", # openstack, aws, vmware
+]
+
+setup(
+ name=_name,
+ version_command=('git -C .. describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ description = _description,
+ long_description = open('README.rst').read(),
+ author = _author,
+ author_email = _author_email,
+ maintainer = _maintainer,
+ maintainer_email = _maintainer_email,
+ url = _url,
+ license = _license,
+ packages = [_name],
+ #packages = ['osm_ro', 'osm_roclient'],
+ package_dir = {_name: _name},
+ # package_data = {_name: ['vnfs/*.yaml', 'vnfs/examples/*.yaml',
+ # 'scenarios/*.yaml', 'scenarios/examples/*.yaml',
+ # 'instance-scenarios/examples/*.yaml', 'database_utils/*',
+ # 'scripts/*']},
+ # data_files = [('/etc/osm/', ['osm_ro/openmanod.cfg']),
+ # ('/etc/systemd/system/', ['osm_ro/osm-ro.service']),
+ # ],
+ scripts=['osm_ro/scripts/RO-start.sh'
+ #'openmanod', 'openmano', 'osm_ro/scripts/service-openmano', 'osm_ro/scripts/openmano-report',
+ ],
+ install_requires=_requirements,
+ include_package_data=True,
+ setup_requires=['setuptools-version-command'],
+ #test_suite='nose.collector',
+)
+
--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
+Depends3 : python3-bottle, python3-jsonschema, python3-mysqldb, python3-paramiko, python3-yaml,
+ libmysqlclient-dev, mysql-client, python3-osm-ro-plugin,
+ python3-requests, python3-netaddr,
+ python3-osm-im,
+
+# TODO py3 libssl-dev, libffi-dev, python-logutils, python-lib-osm-openvim,
+# TODO py3 python3-networkx
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple_ha
+ description: Simple network scenario consisting of two VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux_test_2vms # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: control0 # Node and its interface
+ - linux1: control1 # Node and its interface
+
+
+
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+
+vnf:
+ name: linux_test_2vms
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ external-connections:
+ - name: control0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: linux-VM-HA-A # Virtual Machine this interface belongs to
+ local_iface_name: eth0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface 0
+ - name: control1
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: linux-VM-HA-B # Virtual Machine this interface belongs to
+ local_iface_name: eth0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface 1
+ VNFC:
+ - name: linux-VM-HA-A
+ description: Generic Linux Virtual Machine
+ availability_zone: A # availanility zone A
+ #Copy the image to a compute path and edit this path
+ image name: TestVM
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 10
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:11.0"
+ numas: []
+ - name: linux-VM-HA-B
+ description: Generic Linux Virtual Machine
+ availability_zone: B # availanility zone B
+ #Copy the image to a compute path and edit this path
+ image name: TestVM
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 10
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:12.0"
+ numas: []
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_additional_disk_empty_volume
+ description: Just deploy vnf_2_disks
+ public: false # if available for other tenants
+ vnfs:
+ vnf_2_disks: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_additional_disk_empty_volume #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ mgmt:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_2_disks: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_additional_disk_empty_volume
+ description: VNF with additional volume based on image
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: TEMPLATE-VM # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: TEMPLATE-VM # name of Virtual Machine
+ description: TEMPLATE description
+ image name: ubuntu16.04
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ size: 1
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_floating_ip
+ description: vnf_floating_ip
+ public: false # if available for other tenants
+ vnfs:
+ vnf_floating_ip: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_floating_ip #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ mgmt:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_floating_ip: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_floating_ip
+ description: VNF disabling port_security option in mgmt interface
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: vnf_floating_ip # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: vnf_floating_ip # name of Virtual Machine
+ description: vnf_floating_ip
+ image name: ubuntu16.04
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ floating-ip: True
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: test_2vdu_nsd
+ name: additional_disk_based_image
+ short-name: 2disks
+ description: Just deploy vnf_2_disks
+ vendor: OSM
+ version: '1.0'
+ constituent-vnfd:
+ - member-vnf-index: vnf2disks
+ vnfd-id-ref: additional_disk_based_image
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: mgmt
+ short-name: vld1-sname
+ type: ELAN
+ mgmt-network: 'true'
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: vnf2disks
+ vnfd-id-ref: additional_disk_based_image
+ vnfd-connection-point-ref: mgmt0
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+---
+vnfd-catalog:
+ vnfd:
+ - connection-point:
+ - name: mgmt0
+ type: VPORT
+ name: vnf_additional_disk_based_image
+ description: VNF with additional volume based on image
+ id: additional_disk_based_image
+ # short-name: 2disks
+ vendor: ROtest
+ version: '1.0'
+ mgmt-interface:
+ cp: mgmt0
+ vdu:
+ - id: VM1
+ name: VM1-name
+ image: US1604
+ alternative-images:
+ - vim-type: openstack
+ image: cirros
+ - vim-type: openvim
+ image: cirros034
+ volumes:
+ - name: vdb
+ device-type: disk
+ image: cirros034
+ # image-checksum: 4a293322f18827af81a9450e3792947c
+ size: 8
+ interface:
+ - name: iface11
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: mgmt0
+ mac-address: "52:33:44:55:66:77"
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '8'
+ vcpu-count: '1'
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_no_port_security
+ description: vnf_no_port_security
+ public: false # if available for other tenants
+ vnfs:
+ vnf_no_port_security: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_no_port_security #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ mgmt:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_no_port_security: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_no_port_security
+ description: VNF disabling port_security option in mgmt interface
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: vnf_no_port_security # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: vnf_no_port_security # name of Virtual Machine
+ description: vnf_no_port_security
+ image name: ubuntu16.04
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ port-security: False
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_passthrough
+ description: Network scenario consisting of two machines with a sr-iov interconnected between them
+ vnfs:
+ passthrough1: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ passthrough2: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - passthrough1: eth0 # Node and its interface
+ - passthrough2: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - passthrough1: xe0 # Node and its interface
+ - passthrough2: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: passthrough
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: passthrough-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: passthrough-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: passthrough-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_passthrough
+ description: Network scenario consisting of 4 machines with a passthrough interconnected between them
+ vnfs:
+ passthrough1: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ passthrough2: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ passthrough3: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ passthrough4: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - passthrough1: eth0 # Node and its interface
+ - passthrough2: eth0 # Node and its interface
+ - passthrough3: eth0 # Node and its interface
+ - passthrough4: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - passthrough1: xe0 # Node and its interface
+ - passthrough2: xe0 # Node and its interface
+ - passthrough3: xe0 # Node and its interface
+ - passthrough4: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: passthrough
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: passthrough-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: passthrough-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: passthrough-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_sriov
+ description: Network scenario consisting of four machines with a sr-iov interconnected between them
+ vnfs:
+ sriov1: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ sriov2: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ sriov3: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ sriov4: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - sriov1: eth0 # Node and its interface
+ - sriov2: eth0 # Node and its interface
+ - sriov3: eth0 # Node and its interface
+ - sriov4: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - sriov1: xe0 # Node and its interface
+ - sriov2: xe0 # Node and its interface
+ - sriov3: xe0 # Node and its interface
+ - sriov4: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: sriov
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: sriov-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: sriov-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: sriov-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_sriov_passthrough
+ description: Network scenario consisting of two machines with a sr-iov interconnected between them
+ vnfs:
+ sriov1: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ passthrough1: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ sriov2: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ passthrough2: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - sriov1: eth0 # Node and its interface
+ - passthrough1: eth0 # Node and its interface
+ - sriov2: eth0 # Node and its interface
+ - passthrough2: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - sriov1: xe0 # Node and its interface
+ - passthrough1: xe0 # Node and its interface
+ - sriov2: xe0 # Node and its interface
+ - passthrough2: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: passthrough
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: passthrough-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: passthrough-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: passthrough-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: sriov
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: sriov-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: sriov-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: sriov-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple
+ description: Simple network scenario consisting of two VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux # VNF name as introduced in OPENMANO DB
+ linux2: # vnf/net name in the scenario
+ vnf_name: linux # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: eth0 # Node and its interface
+ - linux2: eth0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: linux
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: linux-VM
+ local_iface_name: eth0
+ description: General purpose interface
+ VNFC:
+ - name: linux-VM
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image name: image_name.qcow2
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 10
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:11.0"
+ numas: []
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple-cloud-init
+ description: Simple network scenario consisting of a single VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux-cloud-init # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: eth0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+---
+schema_version: "0.2"
+vnf:
+ name: linux-cloud-init
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ external-connections:
+ - name: eth0
+ type: mgmt
+ description: General purpose interface
+ VNFC: linux-VM
+ local_iface_name: eth0
+ VNFC:
+ - name: linux-VM
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image name: ubuntu16.04
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 2048 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 20
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:11.0"
+ numas: []
+ boot-data:
+ key-pairs:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+ users:
+ - name: atierno
+ key-pairs:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+ boot-data-drive: true
+ config-files:
+ - content: |
+ auto enp0s3
+ iface enp0s3 inet dhcp
+ dest: /etc/network/interfaces.d/enp0s3.cfg
+ permissions: '0644'
+ owner: root:root
+ - content: |
+ #! /bin/bash
+ ls -al >> /var/log/osm.log
+ dest: /etc/rc.local
+ permissions: '0755'
+ - content: "file content"
+ dest: /etc/test_delete
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple_count3
+ description: Simple network scenario consisting of a multi VNFC VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: simple_linux_count3 # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: control0 # Node and its interface
+ internal1: # provide a name for this net or connection
+ external: false
+ interfaces:
+ - linux1: data-eth1
+
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+ name: simple_linux_count3
+ description: "Example of a linux VNF consisting of two VMs with one internal network"
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: internal-eth2
+ description: internalnet
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 192.168.1.0/24
+ gateway-address: 192.168.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 192.168.1.100
+ count: 100
+ elements:
+ - VNFC: linux_3VMs
+ local_iface_name: eth2
+ ip_address: 192.168.1.2
+ external-connections:
+ - name: control0
+ type: mgmt
+ VNFC: linux_3VMs
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: data-eth1
+ type: bridge
+ VNFC: linux_3VMs
+ local_iface_name: eth1
+ description: data interface input
+ VNFC:
+ - name: linux_3VMs
+ count: 3
+ description: "Linux VM1 2 CPUs, 2 GB RAM and 3 bridge interfaces"
+ #Copy the image to a compute path and edit this path
+ image name: TestVM
+ disk: 10
+ vcpus: 2
+ ram: 2048
+ bridge-ifaces:
+ - name: eth0
+ - name: eth1
+ - name: eth2
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: simple
+ name: simple
+ vendor: OSM
+ version: '1.0'
+ description: Simple network scenario consisting of a single VNF connected to an external network
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: linux
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: mgmt
+ short-name: vld1-sname
+ type: ELAN
+ mgmt-network: 'true'
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: linux
+ vnfd-connection-point-ref: eth0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnfd-catalog:
+ vnfd:
+ - id: linux
+ name: linux
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ connection-point:
+ - name: eth0
+ type: VPORT
+ vdu:
+ - id: linux-VM
+ name: linux-VM
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image: image_name.qcow2
+ vm-flavor:
+ memory-mb: '1024'
+ storage-gb: '10'
+ vcpu-count: '1'
+ interface:
+ - name: eth0
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:11.0"
+ external-connection-point-ref: eth0
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: simple_multi_vnfc
+ name: simple_multi_vnfc
+ vendor: OSM
+ version: '1.0'
+ description: Simple network scenario consisting of a multi VNFC VNF connected to an external network
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: linux_2VMs_v02
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: mgmt
+ short-name: vld1-sname
+ type: ELAN
+ mgmt-network: 'true'
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: linux_2VMs_v02
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: linux_2VMs_v02
+ vnfd-connection-point-ref: xe1
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnfd-catalog:
+ vnfd:
+ - id: linux_2VMs_v02
+ name: linux_2VMs_v02
+ description: "Example of a linux VNF consisting of two VMs with one internal network"
+ connection-point:
+ - id: eth0
+ name: eth0
+ short-name: eth0
+ type: VPORT
+ - id: xe1
+ name: xe1
+ short-name: xe1
+ type: VPORT
+ internal-vld:
+ - id: internalnet
+ name: internalnet
+ short-name: internalnet
+ ip-profile-ref: ip-prof1
+ type: ELAN
+ internal-connection-point:
+ - id-ref: VM1-xe0
+ - id-ref: VM2-xe0
+ ip-profiles:
+ - name: ip-prof1
+ description: IP profile
+ gateway-address: 192.168.1.1
+ dns-address: 8.8.8.8
+ #- address: 8.8.8.8
+ ip-profile-params:
+ ip-version: ipv4
+ subnet-address: 192.168.1.0/24
+ dhcp-params:
+ enabled: true
+ start-address: 192.168.1.100
+ count: 100
+ vdu:
+ - id: linux_2VMs-VM1
+ name: linux_2VMs-VM1
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image: TestVM
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '10'
+ vcpu-count: '4'
+ interface:
+ - name: eth0
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:09.0"
+ external-connection-point-ref: eth0
+ - name: xe0
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:11.0"
+ internal-connection-point-ref: VM1-xe0
+ - name: xe1
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:12.0"
+ external-connection-point-ref: xe1
+ internal-connection-point:
+ - id: VM1-xe0
+ name: VM1-xe0
+ short-name: VM1-xe0
+ type: VPORT
+ - id: linux_2VMs-VM2
+ name: linux_2VMs-VM2
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image: TestVM
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '10'
+ vcpu-count: '4'
+ interface:
+ - name: eth0
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:09.0"
+ external-connection-point-ref: eth0
+ - name: xe0
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:11.0"
+ internal-connection-point-ref: VM2-xe0
+ - name: xe1
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ vpci: "0000:00:12.0"
+ external-connection-point-ref: xe1
+ internal-connection-point:
+ - id: VM2-xe0
+ name: VM2-xe0
+ short-name: VM2-xe0
+ type: VPORT
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_sriov
+ description: Network scenario consisting of two machines with a sr-iov interconnected between them
+ vnfs:
+ sriov1: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ sriov2: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - sriov1: eth0 # Node and its interface
+ - sriov2: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - sriov1: xe0 # Node and its interface
+ - sriov2: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: sriov
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: sriov-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: sriov-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: sriov-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: p2p_sriov_passthrough
+ description: Network scenario consisting of two machines with a sr-iov interconnected between them
+ vnfs:
+ sriov: # vnf/net name in the scenario
+ vnf_name: sriov # VNF name as introduced in OPENMANO DB
+ passthrough: # vnf/net name in the scenario
+ vnf_name: passthrough # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - sriov: eth0 # Node and its interface
+ - passthrough: eth0 # Node and its interface
+ dataplane: # provide a name for this net or connection
+ interfaces:
+ - sriov: xe0 # Node and its interface
+ - passthrough: xe0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: passthrough
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: passthrough-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: passthrough-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: passthrough-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: sriov
+ description: Machine with EPA and a SR-IOV interface
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: sriov-VM
+ local_iface_name: eth0
+ description: management interface
+ - name: xe0
+ type: data
+ VNFC: sriov-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ VNFC:
+ - name: sriov-VM
+ description: Machine with EPA and a SR-IOV interface
+ image name: centos
+ disk: 20
+ numas:
+ - threads: 1 # "cores", "paired-threads", "threads"
+ memory: 1 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:0a.0"
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+nsd:nsd-catalog:
+ nsd:
+ - id: test_2vdu_nsd
+ name: test_2vdu_nsd_name
+ short-name: test_2vdu_nsd_sname
+ description: 2 vnfs, eatch one with 3 cirros vdu
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: osm_2x.png
+
+ # Specify the VNFDs that are part of this NSD
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: test_2vdu
+ - member-vnf-index: 2
+ vnfd-id-ref: test_2vdu2
+
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: 10.31.31.254
+ ip-version: ipv4
+ subnet-address: 10.31.31.0/24
+ dns-server:
+ - address: 8.8.8.8
+ - address: 8.8.8.9
+ dhcp-params:
+ count: 200
+ start-address: 10.31.31.20
+ name: ipprofileA
+ - description: IP profile that disables dhcp server
+ ip-profile-params:
+ dhcp-params:
+ enabled: 'false'
+ ip-version: ipv4
+ name: no_dhcp
+
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: mgmt
+ short-name: vld1-sname
+ type: ELAN
+ mgmt-network: 'true'
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: test_2vdu
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: test_2vdu2
+ vnfd-connection-point-ref: eth0
+
+ - id: vld2
+ name: nsd-vld2
+ short-name: vld2-sname
+ type: ELAN
+ ip-profile-ref: ipprofileA
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: test_2vdu
+ vnfd-connection-point-ref: eth1
+ ip-address: 10.31.31.4
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: test_2vdu2
+ vnfd-connection-point-ref: eth1
+ ip-address: 10.31.31.5
+
+ - id: vld3
+ name: nsd-vld3
+ short-name: vld3-sname
+ type: ELAN
+ ip-profile-ref: no_dhcp
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: test_2vdu
+ vnfd-connection-point-ref: eth4
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: test_2vdu2
+ vnfd-connection-point-ref: eth4
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+vnfd-catalog:
+ vnfd:
+ - connection-point:
+ - name: eth0
+ type: VPORT
+ - name: eth1
+ type: VPORT
+ - name: eth4
+ type: VPORT
+ description: VNF with internal VLD and set IP and mac
+ id: test_2vdu
+ name: test_2vdu_name
+ short-name: test_2vdu_sname
+ mgmt-interface:
+ cp: eth0
+ internal-vld:
+ - description: Internal VL
+ id: net_internal
+ name: internal_vld1
+ short-name: net_internal_sname
+ type: ELAN
+ internal-connection-point:
+ - id-ref: eth2
+ ip-address: 10.10.135.4
+ - id-ref: eth3
+ ip-address: 10.10.135.5
+ ip-profile-ref: ip-profile1
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: null
+ ip-version: ipv4
+ subnet-address: 10.10.135.0/24
+ dhcp-params:
+ count: 100
+ start-address: 10.10.135.20
+ name: ip-profile1
+ vdu:
+ - id: VM1
+ name: VM11
+ image: US1604
+ interface:
+ - name: iface11
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth0
+ mac-address: "52:33:44:55:66:77"
+ - name: iface12
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: eth2
+ mac-address: "52:33:44:55:66:78"
+ - name: iface13
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth4
+ internal-connection-point:
+ - name: eth2-icp
+ id: eth2
+ type: VPORT
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '8'
+ vcpu-count: '1'
+ - id: VM2
+ image: US1604
+ name: VM12
+ interface:
+ - name: iface21
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth1
+ mac-address: 52:33:44:55:66:79
+ - name: iface22
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: eth3
+ mac-address: 52:33:44:55:66:80
+ internal-connection-point:
+ - name: eth3-icp
+ id: eth3
+ type: VPORT
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '8'
+ vcpu-count: '1'
+ vendor: ROtest
+ version: '1.0'
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+vnfd-catalog:
+ vnfd:
+ - connection-point:
+ - name: eth0
+ type: VPORT
+ - name: eth1
+ type: VPORT
+ - name: eth4
+ type: VPORT
+ description: VNF with internal VLD and set IP and mac
+ id: test_2vdu2
+ name: test_2vdu2_name
+ short-name: test_2vdu2_sname
+ mgmt-interface:
+ cp: eth0
+ internal-vld:
+ - description: Internal VL
+ id: net_internal
+ name: internal_vld2
+ short-name: net_internal_sname
+ type: ELAN
+ internal-connection-point:
+ - id-ref: eth2
+ ip-address: 10.10.133.4
+ - id-ref: eth3
+ ip-address: 10.10.133.5
+ ip-profile-ref: ip-profile1
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: 10.10.133.1
+ ip-version: ipv4
+ subnet-address: 10.10.133.0/24
+ dhcp-params:
+ count: 200
+ start-address: 10.10.133.20
+ name: ip-profile1
+ vdu:
+ - id: VM1
+ name: VM21
+ image: US1604
+ interface:
+ - name: iface11
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth0
+ mac-address: "52:33:44:55:66:81"
+ - name: iface12
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: eth2
+ mac-address: "52:33:44:55:66:82"
+ - name: iface13
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth4
+ internal-connection-point:
+ - name: eth2-icp
+ id: eth2
+ type: VPORT
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '8'
+ vcpu-count: '1'
+ - id: VM2
+ image: US1604
+ name: VM22
+ interface:
+ - name: iface21
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth1
+ mac-address: 52:33:44:55:66:83
+ - name: iface22
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: eth3
+ mac-address: 52:33:44:55:66:84
+ internal-connection-point:
+ - name: eth3-icp
+ id: eth3
+ type: VPORT
+ vm-flavor:
+ memory-mb: '2048'
+ storage-gb: '8'
+ vcpu-count: '1'
+ vendor: ROtest
+ version: '1.0'
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano.
+#WARNING: It destroy the database content
+
+
+function usage(){
+ echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n test openmano using openvim as a VIM"
+ echo -e " the OPENVIM_HOST, OPENVIM_PORT shell variables indicate openvim location"
+ echo -e " by default localhost:9080"
+ echo -e " <action> is a list of the following items (by default 'reset add-openvim create delete del-openvim')"
+ echo -e " reset resets the openmano database content and creates osm tenant"
+ echo -e " add-openvim adds and attaches a local openvim datacenter"
+ echo -e " del-openvim detaches and deletes the local openvim datacenter"
+ echo -e " create creates VNFs, scenarios and instances"
+ echo -e " delete deletes the created instances, scenarios and VNFs"
+ echo -e " delete-all deletes ALL the existing instances, scenarios and vnf at the current tenant"
+ echo -e " OPTIONS:"
+ echo -e " -f --force does not prompt for confirmation"
+ echo -e " -h --help shows this help"
+ echo -e " --screen forces to run openmano (and openvim) service in a screen"
+ echo -e " --insert-bashrc insert the created tenant,datacenter variables at"
+ echo -e " ~/.bashrc to be available by openmano CLI"
+ echo -e " --install-openvim installs openvim in test mode"
+ echo -e " --init-openvim --initopenvim if openvim runs locally, initopenvim is called to clean openvim"\
+ "database, create osm tenant and add fake hosts"
+}
+
+function is_valid_uuid(){
+ echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+ return 1
+}
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+
+#detect paths of executables, preceding the relative paths
+openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
+service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
+ service_openmano="$DIRscript/service-openmano"
+initopenvim="initopenvim"
+openvim="openvim"
+
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+
+#process options
+source ${DIRscript}/get-options.sh "force:f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
+ $* || $_exit 1
+
+#help
+[ -n "$option_help" ] && usage && $_exit 0
+
+#check correct arguments
+force_param="" && [[ -n "$option_force" ]] && force_param=" -f"
+insert_bashrc_param="" && [[ -n "$option_insert_bashrc" ]] && insert_bashrc_param=" --insert-bashrc"
+screen_mano_param="" && [[ -n "$option_screen" ]] && screen_mano_param=" --screen-name=mano"
+screen_vim_param="" && [[ -n "$option_screen" ]] && screen_vim_param=" --screen-name=vim"
+
+action_list=""
+
+for argument in $params
+do
+ if [[ $argument == reset ]] || [[ $argument == create ]] || [[ $argument == delete ]] ||
+ [[ $argument == add-openvim ]] || [[ $argument == del-openvim ]] || [[ $argument == delete-all ]] ||
+ [[ -z "$argument" ]]
+ then
+ action_list="$action_list $argument"
+ continue
+ fi
+ echo "invalid argument '$argument'? Type -h for help" >&2 && $_exit 1
+done
+
+export OPENMANO_HOST=localhost
+export OPENMANO_PORT=9090
+[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_HOST=localhost" >> ~/.bashrc
+[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_PORT=9090" >> ~/.bashrc
+
+
+#by default action should be reset and create
+[[ -z $action_list ]] && action_list="reset add-openvim create delete del-openvim"
+
+if [[ -n "$option_install_openvim" ]]
+then
+ echo
+ echo "action: install openvim"
+ echo "################################"
+ mkdir -p ${DIRNAME}/local
+ pushd ${DIRNAME}/local
+ echo "installing openvim at ${DIRNAME}/openvim ... "
+ wget -O install-openvim.sh "https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=blob_plain;f=scripts/install-openvim.sh"
+ chmod +x install-openvim.sh
+ sudo ./install-openvim.sh --no-install-packages --force --quiet --develop
+ openvim="${DIRNAME}/local/openvim/openvim"
+ #force inito-penvim
+ option_init_openvim="-"
+ initopenvim="${DIRNAME}/local/openvim/scripts/initopenvim"
+ popd
+fi
+
+if [[ -n "$option_init_openvim" ]]
+then
+ echo
+ echo "action: init openvim"
+ echo "################################"
+ ${initopenvim} ${force_param}${insert_bashrc_param}${screen_vim_param} || \
+ echo "WARNING openvim cannot be initialized. The rest of test can fail!"
+fi
+
+#check openvim client variables are set
+#fail=""
+#[[ -z $OPENVIM_HOST ]] && echo "OPENVIM_HOST variable not defined" >&2 && fail=1
+#[[ -z $OPENVIM_PORT ]] && echo "OPENVIM_PORT variable not defined" >&2 && fail=1
+#[[ -n $fail ]] && $_exit 1
+
+
+for action in $action_list
+do
+ echo
+ echo "action: $action"
+ echo "################################"
+#if [[ $action == "install-openvim" ]]
+ #echo "Installing and starting openvim"
+ #mkdir -p temp
+ #pushd temp
+ #wget https://github.com/nfvlabs/openvim/raw/v0.4/scripts/install-openvim.sh
+ #chmod -x install-openvim.sh
+#fi
+
+if [[ $action == "reset" ]]
+then
+
+ #ask for confirmation if argument is not -f --force
+ force_=y
+ [[ -z "$option_force" ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N) " force_
+ [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit
+
+ echo "Stopping openmano"
+ $service_openmano mano stop${screen_mano_param}
+ echo "Initializing openmano database"
+ $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
+ echo "Starting openmano"
+ $service_openmano mano start${screen_mano_param}
+ echo
+ printf "%-50s" "Creating openmano tenant 'osm': "
+ result=`$openmano tenant-create osm --description="created by basictest.sh"`
+ nfvotenant=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $nfvotenant && echo "FAIL" && echo " $result" && $_exit 1
+ export OPENMANO_TENANT=osm
+ [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_TENANT=osm" >> ~/.bashrc
+ echo $nfvotenant
+
+elif [[ $action == "delete" ]]
+then
+ result=`openmano tenant-list osm`
+ nfvotenant=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ is_valid_uuid $nfvotenant || ! echo "Tenant osm not found. Already delete?" >&2 || $_exit 1
+ export OPENMANO_TENANT=$nfvotenant
+ $openmano instance-scenario-delete -f simple-instance || echo "fail"
+ $openmano instance-scenario-delete -f complex-instance || echo "fail"
+ $openmano instance-scenario-delete -f complex2-instance || echo "fail"
+ $openmano instance-scenario-delete -f complex3-instance || echo "fail"
+ $openmano instance-scenario-delete -f complex4-instance || echo "fail"
+ $openmano instance-scenario-delete -f complex5-instance || echo "fail"
+ $openmano instance-scenario-delete -f 3vdu_2vnf_nsd-instance || echo "fail"
+ $openmano scenario-delete -f simple || echo "fail"
+ $openmano scenario-delete -f complex || echo "fail"
+ $openmano scenario-delete -f complex2 || echo "fail"
+ $openmano scenario-delete -f complex3 || echo "fail"
+ $openmano scenario-delete -f complex4 || echo "fail"
+ $openmano scenario-delete -f complex5 || echo "fail"
+ $openmano scenario-delete -f osm_id=3vdu_2vnf_nsd || echo "fail"
+ $openmano vnf-delete -f linux || echo "fail"
+ $openmano vnf-delete -f linux_2VMs_v02 || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF_2VMs || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF_2VMs_v02 || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF1 || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF2 || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF3 || echo "fail"
+ $openmano vnf-delete -f dataplaneVNF4 || echo "fail"
+ $openmano vnf-delete -f osm_id=3vdu_vnfd || echo "fail"
+
+elif [[ $action == "delete-all" ]]
+then
+ for i in instance-scenario scenario vnf
+ do
+ for f in `$openmano $i-list | awk '{print $1}'`
+ do
+ [[ -n "$f" ]] && [[ "$f" != No ]] && $openmano ${i}-delete -f ${f}
+ done
+ done
+
+elif [[ $action == "del-openvim" ]]
+then
+ $openmano datacenter-detach local-openvim || echo "fail"
+ $openmano datacenter-delete -f local-openvim || echo "fail"
+
+elif [[ $action == "add-openvim" ]]
+then
+
+ printf "%-50s" "Creating datacenter 'local-openvim' at openmano:"
+ [[ -z $OPENVIM_HOST ]] && OPENVIM_HOST=localhost
+ [[ -z $OPENVIM_PORT ]] && OPENVIM_PORT=9080
+ URL_ADMIN_PARAM=""
+ [[ -n $OPENVIM_ADMIN_PORT ]] && URL_ADMIN_PARAM=" --url_admin=http://${OPENVIM_HOST}:${OPENVIM_ADMIN_PORT}/openvim"
+ result=`$openmano datacenter-create local-openvim "http://${OPENVIM_HOST}:${OPENVIM_PORT}/openvim" \
+ --type=openvim${URL_ADMIN_PARAM} --config="{test: no use just for test}"`
+ datacenter=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $datacenter && echo "FAIL" && echo " $result" && $_exit 1
+ echo $datacenter
+ export OPENMANO_DATACENTER=local-openvim
+ [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_DATACENTER=local-openvim" >> ~/.bashrc
+
+ printf "%-50s" "Attaching openmano tenant to the datacenter:"
+ result=`$openmano datacenter-attach local-openvim --vim-tenant-name=osm --config="{test: no use just for test}"`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ echo OK
+
+ printf "%-50s" "Updating external nets in openmano: "
+ result=`$openmano datacenter-netmap-delete -f --all`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ result=`$openmano datacenter-netmap-import -f`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ echo OK
+ result=`$openmano datacenter-netmap-create --name=default --vim-name=mgmt`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ echo OK
+
+elif [[ $action == "create" ]]
+then
+ for VNF in linux dataplaneVNF1 dataplaneVNF2 dataplaneVNF_2VMs dataplaneVNF_2VMs_v02 dataplaneVNF3 linux_2VMs_v02 dataplaneVNF4
+ do
+ printf "%-50s" "Creating VNF '${VNF}': "
+ result=`$openmano vnf-create $DIRmano/vnfs/examples/${VNF}.yaml`
+ vnf=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+ done
+
+ printf "%-50s" "Creating VNF '${VNF}': "
+ result=`$openmano vnf-create $DIRmano/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034`
+ vnf=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ for NS in simple complex complex2 complex3 complex4 complex5 v3_3vdu_2vnf_nsd
+ do
+ printf "%-50s" "Creating scenario '${NS}':"
+ result=`$openmano scenario-create $DIRmano/scenarios/examples/${NS}.yaml`
+ scenario=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $scenario && echo FAIL && echo " $result" && $_exit 1
+ echo $scenario
+ done
+
+ for IS in simple complex complex2 complex3 complex5 osm_id=3vdu_2vnf_nsd
+ do
+ printf "%-50s" "Creating instance-scenario '${IS}':"
+ result=`$openmano instance-scenario-create --scenario ${IS} --name ${IS#osm_id=}-instance`
+ instance=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $instance && echo FAIL && echo " $result" && $_exit 1
+ echo $instance
+ done
+
+ printf "%-50s" "Creating instance-scenario 'complex4':"
+ result=`$openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml`
+ instance=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $instance && echo FAIL && echo " $result" && $_exit 1
+ echo $instance
+
+ echo
+ #echo "Check virtual machines are deployed"
+ #vms_error=`openvim vm-list | grep ERROR | wc -l`
+ #vms=`openvim vm-list | wc -l`
+ #[[ $vms -ne 8 ]] && echo "WARNING: $vms VMs created, must be 8 VMs" >&2 && $_exit 1
+ #[[ $vms_error -gt 0 ]] && echo "WARNING: $vms_error VMs with ERROR" >&2 && $_exit 1
+fi
+done
+
+echo
+echo DONE
+
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script is a basic test for openmano, that deals with two openvim
+#stopping on an error
+#WARNING: It destroy the database content
+
+
+function usage(){
+ echo -e "usage: ${BASH_SOURCE[0]} [-f]\n Deletes openvim/openmano content and make automatically the wiki steps"
+ echo -e " at 'https://github.com/nfvlabs/openmano/wiki/Getting-started#how-to-use-it'"
+ echo -e " OPTIONS:"
+ echo -e " -f --force : does not prompt for confirmation"
+ echo -e " -h --help : shows this help"
+}
+
+function is_valid_uuid(){
+ echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+ return 1
+}
+
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+#check correct arguments
+[[ -n $1 ]] && [[ $1 != -h ]] && [[ $1 != --help ]] && [[ $1 != -f ]] && [[ $1 != --force ]] && \
+ echo "invalid argument '$1'?" && usage >&2 && $_exit 1
+[[ $1 == -h ]] || [[ $1 == --help ]] && usage && $_exit 0
+
+#ask for confirmation if argument is not -f --force
+force=""
+[[ $1 == -f ]] || [[ $1 == --force ]] && force=y
+[[ $force != y ]] && read -e -p "WARNING: openmano and openvim database content will be lost!!! Continue(y/N)" force
+[[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIR_BASE=$(dirname $DIRNAME)
+DIR_BASE=$(dirname $DIR_BASE)
+DIRvim=$DIR_BASE/openvim
+DIRmano=$DIR_BASE/openmano
+DIRscripts=$DIR_BASE/scripts
+
+echo "deleting deployed vm"
+openvim vm-delete -f | grep -q deleted && sleep 10 #give some time to get virtual machines deleted
+
+echo "Stopping openmano"
+$DIRscripts/service-openmano stop
+
+echo "Initializing databases"
+$DIRvim/database_utils/init_vim_db.sh -u vim -p vimpw
+$DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
+
+echo "Starting openmano"
+$DIRscripts/service-openmano start
+
+echo "Creating openmano tenant 'mytenant'"
+nfvotenant=`openmano tenant-create mytenant --description=mytenant |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $nfvotenant || ! echo "fail" >&2 || $_exit 1
+export OPENMANO_TENANT=$nfvotenant
+echo " $nfvotenant"
+
+echo "Adding example hosts"
+openvim host-add $DIRvim/test/hosts/host-example0.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example1.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example2.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example3.json || ! echo "fail" >&2 || $_exit 1
+echo "Adding example nets"
+openvim net-create $DIRvim/test/networks/net-example0.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example1.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example2.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example3.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating openvim tenant 'admin'"
+vimtenant=`openvim tenant-create '{"tenant": {"name":"admin", "description":"admin"}}' |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $vimtenant || ! echo "fail" >&2 || $_exit 1
+echo " $vimtenant"
+OPENVIM_TENANT_1=$vimtenant && export OPENVIM_TENANT=$vimtenant
+
+echo "Creating datacenter 'mydc1' in openmano"
+datacenter=`openmano datacenter-create mydc1 http://localhost:9080/openvim |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1
+echo " $datacenter"
+OPENMANO_DATACENTER_1=$datacenter && export OPENMANO_DATACENTER=$datacenter
+
+echo "Attaching openmano tenant to the datacenter and the openvim tenant"
+openmano datacenter-attach mydc1 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1
+
+echo "Updating external nets in openmano"
+openmano datacenter-net-update -f mydc1 || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating a second fake datacenter 'mydc2' in openmano"
+datacenter2=`openmano datacenter-create mydc2 http://localhost:9082/openvim |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1
+echo " $datacenter2"
+OPENMANO_DATACENTER_2=$datacenter2
+echo "Attaching a second fake openvim 'mydc2'"
+openmano datacenter-attach mydc2 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating VNFs, must fail in second openvim"
+openmano vnf-create $DIRmano/vnfs/examples/linux.yaml || ! echo "fail" >&2 || $_exit 1
+openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF1.yaml || ! echo "fail" >&2 || $_exit 1
+openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF2.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking images and flavors created at openvim"
+nb=`openvim image-list | wc -l`
+echo -n " $nb images "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+echo " $nb flavors "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating Scenarios"
+openmano scenario-create $DIRmano/scenarios/examples/simple.yaml || ! echo "fail" >&2 || $_exit 1
+openmano scenario-create $DIRmano/scenarios/examples/complex.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Deleting openvim images and flavors to force reload again"
+openvim image-delete -f
+openvim flavor-delete -f
+
+echo "Launching scenarios"
+openmano scenario-deploy simple simple-instance || ! echo "fail" >&2 || $_exit 1
+openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking that openvim has 5 VM running"
+nb=`openvim vm-list | wc -l`
+[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
+while openvim vm-list | grep -q CREATING ; do sleep 1; done
+openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
+
+echo "Removing scenarios"
+for scenario in `openmano instance-scenario-list | awk '{print $2}'`
+do
+ openmano instance-scenario-delete -f $scenario
+done
+
+echo "Editing datacenters so that Changing openvim Working with the second openvim"
+openmano datacenter-edit -f mydc1 'vim_url: http://localhost:9083/openvim'
+openmano datacenter-edit -f mydc2 'vim_url: http://localhost:9080/openvim'
+export OPENMANO_DATACENTER=$OPENMANO_DATACENTER_2
+
+echo "Updating external nets in openmano for second datacenter"
+openmano datacenter-net-update -f mydc2 || ! echo "fail" >&2 || $_exit 1
+
+echo "Launching Scenario instances"
+openmano scenario-deploy simple simple-instance || ! echo "fail" >&2 || $_exit 1
+openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking images and flavors created at openvim"
+nb=`openvim image-list | wc -l`
+echo -n " $nb images "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+echo " $nb flavors "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking that openvim has 5 VM running"
+nb=`openvim vm-list | wc -l`
+[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
+while openvim vm-list | grep -q CREATING ; do sleep 1; done
+openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
+
+
+echo
+echo DONE
+#echo "Listing VNFs"
+#openmano vnf-list
+#echo "Listing scenarios"
+#openmano scenario-list
+#echo "Listing scenario instances"
+#openmano instance-scenario-list
+
+
--- /dev/null
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# DEBUG WITH PDB
+from os import getenv
+if getenv('OSMRO_PDB_DEBUG'):
+ import sys
+ print(sys.path)
+ import pdb
+ pdb.set_trace()
+
+
+"""
+Module for testing openmano functionality. It uses openmanoclient.py for invoking openmano
+"""
+
+import logging
+import os
+import argcomplete
+import unittest
+import string
+import inspect
+import random
+# import traceback
+import glob
+import yaml
+import sys
+import time
+import uuid
+from argparse import ArgumentParser
+
+__author__ = "Pablo Montes, Alfonso Tierno"
+__date__ = "$16-Feb-2017 17:08:16$"
+__version__ = "0.1.0"
+version_date = "Oct 2017"
+
+test_config = {} # used for global variables with the test configuration
+
+
+class test_base(unittest.TestCase):
+ test_index = 1
+ test_text = None
+
+ @classmethod
+ def setUpClass(cls):
+ logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
+
+ @classmethod
+ def tearDownClass(cls):
+ test_config["test_number"] += 1
+
+ def tearDown(self):
+ exec_info = sys.exc_info()
+ if exec_info == (None, None, None):
+ logger.info(self.__class__.test_text+" -> TEST OK")
+ else:
+ logger.warning(self.__class__.test_text+" -> TEST NOK")
+ logger.critical("Traceback error",exc_info=True)
+
+
+def check_instance_scenario_active(uuid):
+ instance = test_config["client"].get_instance(uuid=uuid)
+
+ for net in instance['nets']:
+ status = net['status']
+ if status != 'ACTIVE':
+ return (False, status)
+
+ for vnf in instance['vnfs']:
+ for vm in vnf['vms']:
+ status = vm['status']
+ if status != 'ACTIVE':
+ return (False, status)
+
+ return (True, None)
+
+
+'''
+IMPORTANT NOTE
+All unittest classes for code based tests must have prefix 'test_' in order to be taken into account for tests
+'''
+class test_VIM_datacenter_tenant_operations(test_base):
+ tenant_name = None
+
+ def test_000_create_RO_tenant(self):
+ self.__class__.tenant_name = _get_random_string(20)
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ logger.debug("Test create tenant")
+ tenant = test_config["client"].create_tenant(name=self.__class__.tenant_name,
+ description=self.__class__.tenant_name)
+ logger.debug("{}".format(tenant))
+ self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+
+ def test_010_list_RO_tenant(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ tenant = test_config["client"].get_tenant(name=self.__class__.tenant_name)
+ logger.debug("{}".format(tenant))
+ self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+
+ def test_020_delete_RO_tenant(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ tenant = test_config["client"].delete_tenant(name=self.__class__.tenant_name)
+ logger.debug("{}".format(tenant))
+ assert('deleted' in tenant.get('result',""))
+
+
+class test_VIM_datacenter_operations(test_base):
+ datacenter_name = None
+
+ def test_000_create_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.datacenter_name = _get_random_string(20)
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].create_datacenter(name=self.__class__.datacenter_name,
+ vim_url="http://fakeurl/fake")
+ logger.debug("{}".format(self.datacenter))
+ self.assertEqual (self.datacenter.get('datacenter', {}).get('name',''), self.__class__.datacenter_name)
+
+ def test_010_list_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].get_datacenter(all_tenants=True, name=self.__class__.datacenter_name)
+ logger.debug("{}".format(self.datacenter))
+ self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+
+ def test_020_attach_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].attach_datacenter(name=self.__class__.datacenter_name,
+ vim_tenant_name='fake')
+ logger.debug("{}".format(self.datacenter))
+ assert ('uuid' in self.datacenter.get('datacenter', {}))
+
+ def test_030_list_attached_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].get_datacenter(all_tenants=False, name=self.__class__.datacenter_name)
+ logger.debug("{}".format(self.datacenter))
+ self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+
+ def test_040_detach_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].detach_datacenter(name=self.__class__.datacenter_name)
+ logger.debug("{}".format(self.datacenter))
+ assert ('detached' in self.datacenter.get('result', ""))
+
+ def test_050_delete_datacenter(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ self.datacenter = test_config["client"].delete_datacenter(name=self.__class__.datacenter_name)
+ logger.debug("{}".format(self.datacenter))
+ assert('deleted' in self.datacenter.get('result',""))
+
+
+class test_VIM_network_operations(test_base):
+ vim_network_name = None
+ vim_network_uuid = None
+
+ def test_000_create_VIM_network(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.vim_network_name = _get_random_string(20)
+ self.__class__.test_index += 1
+ network = test_config["client"].vim_action("create", "networks", name=self.__class__.vim_network_name)
+ logger.debug("{}".format(network))
+ self.__class__.vim_network_uuid = network["network"]["id"]
+ self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+
+ def test_010_list_VIM_networks(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ networks = test_config["client"].vim_action("list", "networks")
+ logger.debug("{}".format(networks))
+
+ def test_020_get_VIM_network_by_uuid(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ network = test_config["client"].vim_action("show", "networks", uuid=self.__class__.vim_network_uuid)
+ logger.debug("{}".format(network))
+ self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+
+ def test_030_delete_VIM_network_by_uuid(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ network = test_config["client"].vim_action("delete", "networks", uuid=self.__class__.vim_network_uuid)
+ logger.debug("{}".format(network))
+ assert ('deleted' in network.get('result', ""))
+
+
+class test_VIM_image_operations(test_base):
+
+ def test_000_list_VIM_images(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ images = test_config["client"].vim_action("list", "images")
+ logger.debug("{}".format(images))
+
+'''
+The following is a non critical test that will fail most of the times.
+In case of OpenStack datacenter these tests will only success if RO has access to the admin endpoint
+This test will only be executed in case it is specifically requested by the user
+'''
+class test_VIM_tenant_operations(test_base):
+ vim_tenant_name = None
+ vim_tenant_uuid = None
+
+ @classmethod
+ def setUpClass(cls):
+ test_base.setUpClass(cls)
+ logger.warning("In case of OpenStack datacenter these tests will only success "
+ "if RO has access to the admin endpoint")
+
+ def test_000_create_VIM_tenant(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.vim_tenant_name = _get_random_string(20)
+ self.__class__.test_index += 1
+ tenant = test_config["client"].vim_action("create", "tenants", name=self.__class__.vim_tenant_name)
+ logger.debug("{}".format(tenant))
+ self.__class__.vim_tenant_uuid = tenant["tenant"]["id"]
+ self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+
+ def test_010_list_VIM_tenants(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ tenants = test_config["client"].vim_action("list", "tenants")
+ logger.debug("{}".format(tenants))
+
+ def test_020_get_VIM_tenant_by_uuid(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ tenant = test_config["client"].vim_action("show", "tenants", uuid=self.__class__.vim_tenant_uuid)
+ logger.debug("{}".format(tenant))
+ self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+
+ def test_030_delete_VIM_tenant_by_uuid(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ tenant = test_config["client"].vim_action("delete", "tenants", uuid=self.__class__.vim_tenant_uuid)
+ logger.debug("{}".format(tenant))
+ assert ('deleted' in tenant.get('result', ""))
+
+
+class test_vimconn_connect(test_base):
+
+ def test_000_connect(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+
+ self.__class__.test_index += 1
+ if test_config['vimtype'] == 'vmware':
+ vca_object = test_config["vim_conn"].connect()
+ logger.debug("{}".format(vca_object))
+ self.assertIsNotNone(vca_object)
+ elif test_config['vimtype'] in ('openstack', 'azure'):
+ test_config["vim_conn"]._reload_connection()
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("{}".format(network_list))
+ self.assertIsNotNone(network_list)
+
+
+class test_vimconn_new_network(test_base):
+ network_name = None
+
+ def test_000_new_network(self):
+ self.__class__.network_name = _get_random_string(20)
+ network_type = 'bridge'
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index, inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=network_type)
+ self.__class__.network_id = network
+ logger.debug("Created network {}".format(network))
+
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("Network list {}".format(network_list))
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+ self.assertEqual(net.get('type'), network_type)
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("Network list after deletion {}".format(network_list))
+
+ def test_010_new_network_by_types(self):
+ delete_net_ids = []
+ network_types = ['data','bridge','mgmt']
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("Network list at start {}".format(network_list))
+ self.__class__.test_index += 1
+ for net_type in network_types:
+ self.__class__.network_name = _get_random_string(20)
+ network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=net_type)
+
+ delete_net_ids.append(network_id)
+ logger.debug("{}".format(network_id))
+
+ network_list = test_config["vim_conn"].get_network_list()
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+ if net_type in net.get('type'):
+ self.assertEqual(net.get('type'), net_type)
+ else:
+ self.assertNotEqual(net.get('type'), net_type)
+
+ # Deleting created network
+ for net_id in delete_net_ids:
+ result = test_config["vim_conn"].delete_network(net_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(net_id))
+ else:
+ logger.info("Failed to delete network id {}".format(net_id))
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("Network list after test {}".format(network_list))
+
+ def test_020_new_network_by_ipprofile(self):
+ test_directory_content = os.listdir(test_config["test_directory"])
+
+ for dir_name in test_directory_content:
+ if dir_name == 'simple_multi_vnfc':
+ self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+ vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+ break
+
+ for vnfd in vnfd_files:
+ with open(vnfd, 'r') as stream:
+ vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
+
+ #internal_connections_list = vnf_descriptor['vnf']['internal-connections']
+ internal_connections_list = vnf_descriptor['vnfd-catalog']['vnfd'][0]['ip-profiles']
+ for item in internal_connections_list:
+ version = item['ip-version']
+ dhcp_count = item['dhcp-params']['count']
+ dhcp_enabled = item['dhcp-params']['enabled']
+ dhcp_start_address = item['dhcp-params']['start-address']
+ subnet_address = item['subnet-address']
+
+ self.__class__.network_name = _get_random_string(20)
+ ip_profile = {'dhcp_count': dhcp_count,
+ 'dhcp_enabled': dhcp_enabled,
+ 'dhcp_start_address': dhcp_start_address,
+ 'ip_version': version,
+ 'subnet_address': subnet_address
+ }
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type='mgmt',
+ ip_profile=ip_profile)
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("Created network by ip_profile {}".format(network_list))
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_030_new_network_by_isshared(self):
+ self.__class__.network_name = _get_random_string(20)
+ shared = True
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type='bridge',
+ shared=shared)
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+
+ network_list = test_config["vim_conn"].get_network_list()
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+ self.assertEqual(net.get('shared'), shared)
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_040_new_network_by_negative(self):
+ self.__class__.network_name = _get_random_string(20)
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type='unknowntype')
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+ network_list = test_config["vim_conn"].get_network_list()
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_050_refresh_nets_status(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ # creating new network
+ network_name = _get_random_string(20)
+ net_type = 'bridge'
+ network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+ net_type=net_type)
+ # refresh net status
+ net_dict = test_config["vim_conn"].refresh_nets_status([network_id])
+ for attr in net_dict[network_id]:
+ if attr == 'status':
+ self.assertEqual(net_dict[network_id][attr], 'ACTIVE')
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(network_id))
+
+ def test_060_refresh_nets_status_negative(self):
+ unknown_net_id = str(uuid.uuid4())
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # refresh net status
+ # if azure network name must have the following format
+ if test_config['vimtype'] == 'azure':
+ unknown_net_id = "/" + "/".join(["subscriptions", test_config["vim_conn"].subscription_id,
+ "resourceGroups", test_config["vim_conn"].resource_group,
+ "providers", "Microsoft.Network",
+ "virtualNetworks", test_config["vim_conn"].vnet_name,
+ "subnets", unknown_net_id])
+ #unknown_net_id = "/subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/resourceGroups/osmRG/providers/Microsoft.Network/virtualNetworks/osm_vnet/subnets/unnkown_net"
+
+ net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
+ if test_config['vimtype'] in ('openstack', 'azure', 'vmware'):
+ self.assertEqual(net_dict[unknown_net_id]['status'], 'DELETED')
+ else:
+ # TODO : Fix vmware connector to return status DELETED as per vimconn.py
+ self.assertEqual(net_dict, {})
+
+class test_vimconn_get_network_list(test_base):
+ network_name = None
+
+ def setUp(self):
+ # creating new network
+ self.__class__.network_name = _get_random_string(20)
+ self.__class__.net_type = 'bridge'
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=self.__class__.net_type)
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+
+ def tearDown(self):
+ test_base.tearDown(self)
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_000_get_network_list(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ network_list = test_config["vim_conn"].get_network_list()
+ for net in network_list:
+ if self.__class__.network_name in net.get('name'):
+ self.assertIn(self.__class__.network_name, net.get('name'))
+ self.assertEqual(net.get('type'), self.__class__.net_type)
+ self.assertEqual(net.get('status'), 'ACTIVE')
+ if test_config['vimtype'] != 'azure':
+ self.assertEqual(net.get('shared'), False)
+
+ def test_010_get_network_list_by_name(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+ # find network from list by it's name
+ new_network_list = test_config["vim_conn"].get_network_list({'name': network_name})
+ for list_item in new_network_list:
+ if self.__class__.network_name in list_item.get('name'):
+ self.assertEqual(network_name, list_item.get('name'))
+ self.assertEqual(list_item.get('type'), self.__class__.net_type)
+ self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+ def test_020_get_network_list_by_id(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # find network from list by it's id
+ new_network_list = test_config["vim_conn"].get_network_list({'id':self.__class__.network_id})
+ for list_item in new_network_list:
+ if self.__class__.network_id in list_item.get('id'):
+ self.assertEqual(self.__class__.network_id, list_item.get('id'))
+ self.assertEqual(list_item.get('type'), self.__class__.net_type)
+ self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+ def test_030_get_network_list_by_shared(self):
+ Shared = False
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+ # find network from list by it's shared value
+ new_network_list = test_config["vim_conn"].get_network_list({'shared':Shared,
+ 'name':network_name})
+ for list_item in new_network_list:
+ if list_item.get('shared') == Shared:
+ self.assertEqual(list_item.get('shared'), Shared)
+ self.assertEqual(list_item.get('type'), self.__class__.net_type)
+ self.assertEqual(network_name, list_item.get('name'))
+
+ def test_040_get_network_list_by_tenant_id(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ tenant_list = test_config["vim_conn"].get_tenant_list()
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+ for tenant_item in tenant_list:
+ if test_config['tenant'] == tenant_item.get('name'):
+ # find network from list by it's tenant id
+ tenant_id = tenant_item.get('id')
+ new_network_list = test_config["vim_conn"].get_network_list({'tenant_id':tenant_id,
+ 'name':network_name})
+ for list_item in new_network_list:
+ self.assertEqual(tenant_id, list_item.get('tenant_id'))
+ self.assertEqual(network_name, list_item.get('name'))
+ self.assertEqual(list_item.get('type'), self.__class__.net_type)
+ self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+ def test_050_get_network_list_by_status(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ status = 'ACTIVE'
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+ # find network from list by it's status
+ new_network_list = test_config["vim_conn"].get_network_list({'status':status,
+ 'name': network_name})
+ for list_item in new_network_list:
+ self.assertIn(self.__class__.network_name, list_item.get('name'))
+ self.assertEqual(list_item.get('type'), self.__class__.net_type)
+ self.assertEqual(list_item.get('status'), status)
+
+ def test_060_get_network_list_by_negative(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ network_list = test_config["vim_conn"].get_network_list({'name': 'unknown_name'})
+ self.assertEqual(network_list, [])
+
+class test_vimconn_get_network(test_base):
+ network_name = None
+
+ def setUp(self):
+ # creating new network
+ self.__class__.network_name = _get_random_string(20)
+ self.__class__.net_type = 'bridge'
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=self.__class__.net_type)
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+
+ def tearDown(self):
+ test_base.tearDown(self)
+
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_000_get_network(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ network_info = test_config["vim_conn"].get_network(self.__class__.network_id)
+ self.assertEqual(network_info.get('status'), 'ACTIVE')
+ self.assertIn(self.__class__.network_name, network_info.get('name'))
+ self.assertEqual(network_info.get('type'), self.__class__.net_type)
+ self.assertEqual(network_info.get('id'), self.__class__.network_id)
+
+ def test_010_get_network_negative(self):
+ Non_exist_id = str(uuid.uuid4())
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].get_network(Non_exist_id)
+
+ self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_delete_network(test_base):
+ network_name = None
+
+ def test_000_delete_network(self):
+ # Creating network
+ self.__class__.network_name = _get_random_string(20)
+ self.__class__.net_type = 'bridge'
+ network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=self.__class__.net_type)
+ self.__class__.network_id = network
+ logger.debug("{}".format(network))
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+ time.sleep(5)
+ # after deleting network we check in network list
+ network_list = test_config["vim_conn"].get_network_list({ 'id':self.__class__.network_id })
+ self.assertEqual(network_list, [])
+
+ def test_010_delete_network_negative(self):
+ Non_exist_id = str(uuid.uuid4())
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].delete_network(Non_exist_id)
+
+ self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_get_flavor(test_base):
+
+ def test_000_get_flavor(self):
+ test_directory_content = os.listdir(test_config["test_directory"])
+
+ for dir_name in test_directory_content:
+ if dir_name == 'simple_linux':
+ self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+ vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+ break
+
+ for vnfd in vnfd_files:
+ with open(vnfd, 'r') as stream:
+ vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
+
+ vnfc_list = vnf_descriptor['vnf']['VNFC']
+ for item in vnfc_list:
+ if 'ram' in item and 'vcpus' in item and 'disk' in item:
+ ram = item['ram']
+ vcpus = item['vcpus']
+ disk = item['disk']
+
+ flavor_data = {
+ 'name' : _get_random_string(20),
+ 'ram': ram,
+ 'vcpus': vcpus,
+ 'disk': disk
+ }
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+ # get flavor by id
+ result = test_config["vim_conn"].get_flavor(flavor_id)
+ self.assertEqual(ram, result['ram'])
+ self.assertEqual(vcpus, result['vcpus'])
+ self.assertEqual(disk, result['disk'])
+
+ # delete flavor
+ result = test_config["vim_conn"].delete_flavor(flavor_id)
+ if result:
+ logger.info("Flavor id {} sucessfully deleted".format(result))
+ else:
+ logger.info("Failed to delete flavor id {}".format(result))
+
+ def test_010_get_flavor_negative(self):
+ Non_exist_flavor_id = str(uuid.uuid4())
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].get_flavor(Non_exist_flavor_id)
+
+ self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_new_flavor(test_base):
+ flavor_id = None
+
+ def test_000_new_flavor(self):
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] == 'azure':
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.assertEqual((context.exception).http_code, 401)
+ else:
+ # create new flavor
+ self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+ self.assertIsInstance(self.__class__.flavor_id, str)
+ self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
+
+ def test_010_delete_flavor(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # delete flavor
+ if test_config['vimtype'] == 'azure':
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
+
+ self.assertEqual((context.exception).http_code, 401)
+ else:
+ result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
+ if result:
+ logger.info("Flavor id {} sucessfully deleted".format(result))
+ else:
+ logger.error("Failed to delete flavor id {}".format(result))
+ raise Exception ("Failed to delete created flavor")
+
+ def test_020_new_flavor_negative(self):
+ Invalid_flavor_data = {'ram': '1024', 'vcpus': 2.0, 'disk': 2.0}
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_flavor(Invalid_flavor_data)
+ if test_config['vimtype'] != 'azure':
+ self.assertEqual((context.exception).http_code, 400)
+ else:
+ self.assertEqual((context.exception).http_code, 401)
+
+ def test_030_delete_flavor_negative(self):
+ Non_exist_flavor_id = str(uuid.uuid4())
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].delete_flavor(Non_exist_flavor_id)
+
+ if test_config['vimtype'] != 'azure':
+ self.assertEqual((context.exception).http_code, 404)
+ else:
+ self.assertEqual((context.exception).http_code, 401)
+
+# class test_vimconn_new_image(test_base):
+#
+# def test_000_new_image(self):
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# image_path = test_config['image_path']
+# if image_path:
+# self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path, 'metadata': {'upload_location':None} })
+# time.sleep(20)
+#
+# self.assertIsInstance(self.__class__.image_id, (str, unicode))
+# self.assertIsInstance(uuid.UUID(self.__class__.image_id), uuid.UUID)
+# else:
+# self.skipTest("Skipping test as image file not present at RO container")
+#
+# def test_010_new_image_negative(self):
+# Non_exist_image_path = '/temp1/cirros.ovf'
+#
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# with self.assertRaises(Exception) as context:
+# test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path})
+#
+# self.assertEqual((context.exception).http_code, 400)
+#
+# def test_020_delete_image(self):
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
+#
+# self.assertIsInstance(image_id, (str, unicode))
+#
+# def test_030_delete_image_negative(self):
+# Non_exist_image_id = str(uuid.uuid4())
+#
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# with self.assertRaises(Exception) as context:
+# test_config["vim_conn"].delete_image(Non_exist_image_id)
+#
+# self.assertEqual((context.exception).http_code, 404)
+
+# class test_vimconn_get_image_id_from_path(test_base):
+#
+# def test_000_get_image_id_from_path(self):
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# image_path = test_config['image_path']
+# if image_path:
+# image_id = test_config["vim_conn"].get_image_id_from_path( image_path )
+# self.assertEqual(type(image_id),str)
+# else:
+# self.skipTest("Skipping test as image file not present at RO container")
+#
+# def test_010_get_image_id_from_path_negative(self):
+# Non_exist_image_path = '/temp1/cirros.ovf'
+#
+# self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+# self.__class__.test_index,
+# inspect.currentframe().f_code.co_name)
+# self.__class__.test_index += 1
+#
+# with self.assertRaises(Exception) as context:
+# test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+#
+# self.assertEqual((context.exception).http_code, 400)
+
+class test_vimconn_get_image_list(test_base):
+ image_name = None
+ image_id = None
+
+ def test_000_get_image_list(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] != 'azure':
+ image_list = test_config["vim_conn"].get_image_list()
+ logger.debug("{}: Result image list: {}".format(self.__class__.test_text, image_list))
+
+ for item in image_list:
+ if 'name' in item:
+ self.__class__.image_name = item['name']
+ self.__class__.image_id = item['id']
+ self.assertIsInstance(self.__class__.image_name, str)
+ self.assertIsInstance(self.__class__.image_id, str)
+ else:
+ with self.assertRaises(Exception) as context:
+ image_list = test_config["vim_conn"].get_image_list()
+ self.assertEqual((context.exception).http_code, 401)
+ logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+
+ def test_010_get_image_list_by_name(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ self.__class__.image_name = test_config['image_name']
+ logger.debug("{}: Image name: {}".format(self.__class__.test_text, self.__class__.image_name))
+
+ image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
+ logger.debug("{}: Result image list: {}".format(self.__class__.test_text, image_list))
+
+ for item in image_list:
+ self.assertIsInstance(item['id'], str)
+ self.assertIsInstance(item['name'], str)
+ #self.assertEqual(item['id'], self.__class__.image_id)
+ self.assertEqual(item['name'], self.__class__.image_name)
+
+ def test_020_get_image_list_by_id(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
+
+ for item1 in filter_image_list:
+ self.assertIsInstance(item1['id'], str)
+ self.assertIsInstance(item1['name'], str)
+ self.assertEqual(item1['id'], self.__class__.image_id)
+ self.assertEqual(item1['name'], self.__class__.image_name)
+
+ def test_030_get_image_list_negative(self):
+ Non_exist_image_id = uuid.uuid4()
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+ image_list = test_config["vim_conn"].get_image_list({'name': 'Unknown_name', 'id': Non_exist_image_id})
+
+ self.assertIsNotNone(image_list, None)
+ self.assertEqual(image_list, [])
+
+class test_vimconn_new_vminstance(test_base):
+ network_name = None
+ net_type = None
+ network_id = None
+ image_id = None
+ instance_id = None
+
+ def setUp(self):
+ # create network
+ self.__class__.network_name = _get_random_string(20)
+ self.__class__.net_type = 'bridge'
+
+ self.__class__.network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+ net_type=self.__class__.net_type)
+ # find image name and image id
+ if test_config['image_name']:
+ image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+ if len(image_list) == 0:
+ raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+ else:
+ self.__class__.image_id = image_list[0]['id']
+ else:
+ image_list = test_config['vim_conn'].get_image_list()
+ if len(image_list) == 0:
+ raise Exception("Not found any image at VIM")
+ else:
+ self.__class__.image_id = image_list[0]['id']
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+ def test_000_new_vminstance(self):
+ vpci = "0000:00:11.0"
+ name = "eth0"
+
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
+ 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='',
+ start=False,
+ image_id=self.__class__.image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertIsInstance(self.__class__.instance_id, str)
+
+ def test_010_new_vminstance_by_model(self):
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ model_name = 'e1000'
+ name = 'eth0'
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=self.__class__.image_id,
+ flavor_id=flavor_id,net_list=net_list)
+
+ self.assertIsInstance(instance_id, str)
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_020_new_vminstance_by_net_use(self):
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ net_use = 'data'
+ name = 'eth0'
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=self.__class__.image_id,disk_list=None,
+ flavor_id=flavor_id, net_list=net_list)
+ self.assertIsInstance(instance_id, str)
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_030_new_vminstance_by_net_type(self):
+ flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ _type = 'VF'
+ name = 'eth0'
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] == 'vmware':
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': _type, 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+ flavor_id=flavor_id,
+ net_list=net_list)
+ self.assertEqual(type(instance_id),str)
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ # create network of type data
+ network_name = _get_random_string(20)
+ net_type = 'data'
+
+ network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+ net_type=net_type)
+ net_list = [{'use': net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': _type, 'net_id': network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=self.__class__.image_id, disk_list=None,
+ flavor_id=flavor_id,
+ net_list=net_list)
+
+ self.assertEqual(type(instance_id), unicode)
+
+ # delete created network
+ result = test_config["vim_conn"].delete_network(network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(network_id))
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_040_new_vminstance_by_cloud_config(self):
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ name = 'eth0'
+ user_name = 'test_user'
+
+ key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtAjl5R+GSKP3gFrdFxgizKEUzhXKQbyjaxJH9thsK 0/fDiYlaNEjvijgPgiVZkfwvqgWeLprPcpzL2j4jvmmSJ3+7C8ihCwObWP0VUiuewmbIINBPAR0RqusjMRyPsa+q0asFBPOoZLx3Cv3vzmC1AA3mKuCNeT EuA0rlWhDIOVwMcU5sP1grnmuexQB8HcR7BdKcA9y08pTwnCQR8vmtW77SRkaxEGXm4Gnw5qw8Z27mHdk2wWS2SnbVH7aFwWvDXc6jjf5TpEWypdr/EAPC +eJipeS2Oa4FsntEqAu3Fz6gp/9ub8uNqgCgHfMzs6FhYpZpipwS0hXYyF6eVsSx osm@osm']
+
+ users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtAjl5R+GSKP3gFrdFxgizKEUzhXKQbyjaxJH9thsK0/fDiYlaNEjvijgPgiVZkfwvqgWeLprPcpzL2j4jvmmSJ3+7C8ihCwObWP0VUiuewmbIINBPAR0RqusjMRyPsa+q0asFBPOoZLx3Cv3vzmC1AA3mKuCNeTEuA0rlWhDIOVwMcU5sP1grnmuexQB8HcR7BdKcA9y08pTwnCQR8vmtW77SRkaxEGXm4Gnw5qw8Z27mHdk2wWS2SnbVH7aFwWvDXc6jjf5TpEWypdr/EAPC+eJipeS2Oa4FsntEqAu3Fz6gp/9ub8uNqgCgHfMzs6FhYpZpipwS0hXYyF6eVsSx osm@osm'], 'name': 'cloudinit'}]
+
+ cloud_data = {'config-files': [{'content': 'auto enp0s3\niface enp0s3 inet dhcp\n', 'dest': '/etc/network/interfaces.d/enp0s3.cfg', 'owner': 'root:root', 'permissions': '0644'}, {'content': '#! /bin/bash\nls -al >> /var/log/osm.log\n', 'dest': '/etc/rc.local', 'permissions': '0755'}, {'content': 'file content', 'dest': '/etc/test_delete'}], 'boot-data-drive': True, 'key-pairs': key_pairs, 'users': users_data }
+ #cloud_data = {'users': users_data }
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', description='', start=False,
+ image_id=self.__class__.image_id,
+ flavor_id=flavor_id,net_list=net_list,
+ cloud_config=cloud_data)
+
+ self.assertIsInstance(instance_id, str)
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_050_new_vminstance_by_disk_list(self):
+ flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ name = 'eth0'
+
+ device_data = [{'image_id': self.__class__.image_id, 'size': '10'}]
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False,
+ image_id=self.__class__.image_id,
+ flavor_id=flavor_id, net_list=net_list,
+ disk_list=device_data)
+
+ self.assertIsInstance(instance_id, str)
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_060_new_vminstance_negative(self):
+ unknown_flavor_id = str(uuid.uuid4())
+ unknown_image_id = str(uuid.uuid4())
+ name = 'eth2'
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=unknown_image_id,
+ flavor_id=unknown_flavor_id,
+ net_list=net_list)
+
+ self.assertIn((context.exception).http_code, (400, 404))
+
+
+ def test_070_get_vminstance(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Get instance by its id
+ vm_info = test_config["vim_conn"].get_vminstance(self.__class__.instance_id)
+
+ if test_config['vimtype'] == 'vmware':
+ for attr in vm_info:
+ if attr == 'status':
+ self.assertEqual(vm_info[attr], 'ACTIVE')
+ if attr == 'hostId':
+ self.assertEqual(type(vm_info[attr]), str)
+ if attr == 'interfaces':
+ self.assertEqual(type(vm_info[attr]), list)
+ self.assertEqual(vm_info[attr][0]['IsConnected'], 'true')
+ if attr == 'IsEnabled':
+ self.assertEqual(vm_info[attr], 'true')
+
+ def test_080_get_vminstance_negative(self):
+ unknown_instance_id = str(uuid.uuid4())
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].get_vminstance(unknown_instance_id)
+
+ self.assertEqual((context.exception).http_code, 404)
+
+ def test_090_refresh_vms_status(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] == 'vmware':
+ vm_list = []
+ vm_list.append(self.__class__.instance_id)
+
+ # refresh vm status
+ vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+ for attr in vm_info[self.__class__.instance_id]:
+ if attr == 'status':
+ self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
+ if attr == 'interfaces':
+ self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ vpci = "0000:00:11.0"
+ name = "eth0"
+
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+ # create new vm instance
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
+ 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=self.__class__.image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ time.sleep(30)
+ vm_list = []
+ vm_list.append(instance_id)
+
+ # refresh vm status
+ vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+ for attr in vm_info[instance_id]:
+ if attr == 'status':
+ self.assertEqual(vm_info[instance_id][attr], 'ACTIVE')
+ if attr == 'interfaces':
+ self.assertEqual(type(vm_info[instance_id][attr]), list)
+
+ #Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+ def test_100_refresh_vms_status_negative(self):
+ unknown_id = str(uuid.uuid4())
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
+
+ if test_config['vimtype'] == 'vmware':
+ self.assertEqual(vm_dict,{})
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ self.assertEqual(vm_dict[unknown_id]['status'], 'DELETED')
+
+ def test_110_action_vminstance(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] == 'vmware':
+ action_list = ['shutdown', 'start', 'shutoff', 'rebuild', 'pause', 'resume']
+ # various action on vminstace
+ for action in action_list:
+ instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
+ {action: None})
+ self.assertEqual(instance_id, self.__class__.instance_id)
+
+ if test_config['vimtype'] in ('openstack', 'azure'):
+ # create new vm instance
+ vpci = "0000:00:11.0"
+ name = "eth0"
+
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
+ 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='',
+ start=False, image_id=self.__class__.image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ if test_config['vimtype'] == 'openstack':
+ action_list = ['shutdown','start','shutoff','rebuild','start','pause','start']
+ else:
+ action_list = ['shutdown','start','stop','start','shutoff','start','reboot']
+
+ # various action on vminstace
+ for action in action_list:
+ # sleep for sometime till status is changed
+ time.sleep(25)
+ instance_id = test_config["vim_conn"].action_vminstance(new_instance_id,
+ { action: None})
+
+ self.assertTrue(instance_id is None)
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(new_instance_id)
+ time.sleep(10)
+
+ def test_120_action_vminstance_negative(self):
+ non_exist_id = str(uuid.uuid4())
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ action = 'start'
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
+
+ self.assertEqual((context.exception).http_code, 404)
+
+
+ def test_130_delete_vminstance(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
+ time.sleep(10)
+
+ def test_140_new_vminstance_sriov(self):
+ logger.info("Testing creation of sriov vm instance using {}".format(test_config['sriov_net_name']))
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ name = 'eth0'
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ sriov_net_name = test_config['sriov_net_name']
+ new_network_list = test_config["vim_conn"].get_network_list({'name': sriov_net_name})
+ for list_item in new_network_list:
+ self.assertEqual(sriov_net_name, list_item.get('name'))
+ self.__class__.sriov_network_id = list_item.get('id')
+
+ net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF',
+ 'net_id': self.__class__.sriov_network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False,
+ image_id=self.__class__.image_id, flavor_id=flavor_id,
+ net_list=net_list)
+
+ self.assertIsInstance(instance_id, str)
+
+ logger.info("Waiting for created sriov-vm intance")
+ time.sleep(10)
+ # Deleting created vm instance
+ logger.info("Deleting created sriov-vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+class test_vimconn_get_tenant_list(test_base):
+ tenant_id = None
+
+ def test_000_get_tenant_list(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Getting tenant list
+ tenant_list = test_config["vim_conn"].get_tenant_list()
+ logger.debug(self.__class__.test_text + "Tenant list: " + str(tenant_list))
+
+ for item in tenant_list:
+ if test_config['tenant'] == item['name']:
+ self.__class__.tenant_id = item['id']
+ self.assertIsInstance(item['name'], str)
+ self.assertIsInstance(item['id'], str)
+
+ def test_010_get_tenant_list_by_id(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Getting filter tenant list by its id
+ filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
+ logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+
+ for item in filter_tenant_list:
+ self.assertIsInstance(item['id'], str)
+ self.assertEqual(item['id'], self.__class__.tenant_id)
+
+ def test_020_get_tenant_list_by_name(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Getting filter tenant list by its name
+ filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
+ logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+
+ for item in filter_tenant_list:
+ self.assertIsInstance(item['name'], str)
+ self.assertEqual(item['name'], test_config['tenant'])
+
+ def test_030_get_tenant_list_by_name_and_id(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ # Getting filter tenant list by its name and id
+ filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant'],
+ 'id': self.__class__.tenant_id})
+ logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+
+ for item in filter_tenant_list:
+ self.assertIsInstance(item['name'], str)
+ self.assertIsInstance(item['id'], str)
+ self.assertEqual(item['name'], test_config['tenant'])
+ self.assertEqual(item['id'], self.__class__.tenant_id)
+
+ def test_040_get_tenant_list_negative(self):
+ non_exist_tenant_name = "Tenant_123"
+ non_exist_tenant_id = "kjhgrt456-45345kjhdfgnbdk-34dsfjdfg"
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': non_exist_tenant_name,
+ 'id': non_exist_tenant_id})
+ logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+
+ self.assertEqual(filter_tenant_list, [])
+
+
+class test_vimconn_new_tenant(test_base):
+ tenant_id = None
+
+ def test_000_new_tenant(self):
+ tenant_name = _get_random_string(20)
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] != 'azure':
+ self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
+ time.sleep(15)
+
+ self.assertIsInstance(self.__class__.tenant_id, str)
+ else:
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_tenant(self.__class__.tenant_id, "")
+ self.assertEqual((context.exception).http_code, 401)
+ logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+
+
+ def test_010_new_tenant_negative(self):
+ Invalid_tenant_name = 10121
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_tenant(Invalid_tenant_name, "")
+
+ if test_config['vimtype'] != 'azure':
+ self.assertEqual((context.exception).http_code, 400)
+ else:
+ self.assertEqual((context.exception).http_code, 401)
+ logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+
+
+ def test_020_delete_tenant(self):
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ if test_config['vimtype'] != 'azure':
+ tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
+ self.assertIsInstance(tenant_id, str)
+ else:
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
+ self.assertEqual((context.exception).http_code, 401)
+ logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+
+ def test_030_delete_tenant_negative(self):
+ non_exist_tenant_name = 'Test_30_tenant'
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].delete_tenant(non_exist_tenant_name)
+
+ if test_config['vimtype'] != 'azure':
+ self.assertEqual((context.exception).http_code, 404)
+ else:
+ self.assertEqual((context.exception).http_code, 401)
+ logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+
+
+def get_image_id():
+ if test_config['image_name']:
+ image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+ if len(image_list) == 0:
+ raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+ else:
+ image_id = image_list[0]['id']
+ else:
+ image_list = test_config['vim_conn'].get_image_list()
+ if len(image_list) == 0:
+ raise Exception("Not found any image at VIM")
+ else:
+ image_id = image_list[0]['id']
+ return image_id
+
+
+class test_vimconn_vminstance_by_ip_address(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+
+ self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+ def test_000_vminstance_by_ip_address(self):
+ """
+ This test case will deploy VM with provided IP address
+ Pre-requesite: provided IP address should be from IP pool range which has used for network creation
+ """
+ name = "eth0"
+ # provide ip address
+ ip_address = ''
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id, 'ip_address': ip_address}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_010_vminstance_by_ip_address_negative(self):
+ name = "eth1"
+ # IP address not from subnet range
+ invalid_ip_address = '10.10.12.1'
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id, 'ip_address': invalid_ip_address}]
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id,
+ net_list=net_list)
+ self.assertEqual((context.exception).http_code, 400)
+
+ def test_020_vminstance_by_floating_ip(self):
+ name = "eth1"
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': True, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_030_vminstance_by_mac_address(self):
+ name = "eth1"
+ mac_address = "74:54:2f:21:da:8c"
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id,'mac_address': mac_address}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+class test_vimconn_vminstance_by_adding_10_nics(test_base):
+ network_name = None
+ net_ids = []
+
+ def setUp(self):
+ # create network
+ i = 0
+ for i in range(10):
+ self.network_name = _get_random_string(20)
+ network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+ self.net_ids.append(network_id)
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ for net_id in self.net_ids:
+ result = test_config["vim_conn"].delete_network(net_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(net_id))
+ else:
+ logger.info("Failed to delete network id {}".format(net_id))
+
+ def test_000_vminstance_by_adding_10_nics(self):
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = []
+ c = 1
+ for net_id in self.net_ids:
+ name = "eth{}".format(c)
+ net_list.append({'use': 'bridge', 'name': name, 'floating_ip': False,
+ 'port_security': True, 'type': 'virtual', 'net_id': net_id})
+ c = c+1
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+class test_vimconn_vminstance_by_existing_disk(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+ def test_000_vminstance_by_existing_disk(self):
+ """ This testcase will add existing disk only if given catalog/image is free
+ means not used by any other VM
+ """
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+ cirros_image = test_config["vim_conn"].get_image_list({'name': 'cirros'})
+ disk_list = [{'image_id': cirros_image[0]['id'],'size': 5}]
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_010_vminstance_by_new_disk(self):
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+ disk_list = [{'size': '5'}]
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_020_vminstance_by_CDROM(self):
+ """ This testcase will insert media file only if provided catalog
+ has pre-created ISO media file into vCD
+ """
+ flavor_data ={'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+ image_list = test_config["vim_conn"].get_image_list({'name':'Ubuntu'})
+ disk_list = [{'image_id':image_list[0]['id'],'device_type':'cdrom'}]
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list )
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+class test_vimconn_vminstance_by_affinity_anti_affinity(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+ def test_000_vminstance_by_affinity_anti_affinity(self):
+ """ This testcase will deploy VM into provided HOSTGROUP in VIM config
+ Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+ While creating VIM account user has to pass the Host Group names in availability_zone list
+ """
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,availability_zone_index=1,
+ availability_zone_list=['HG_174','HG_175'])
+
+ self.assertEqual(type(instance_id),str)
+ time.sleep(10)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+
+class test_vimconn_vminstance_by_numa_affinity(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+ def test_000_vminstance_by_numa_affinity(self):
+ flavor_data = {'extended': {'numas': [{'paired-threads-id': [['1', '3'], ['2', '4']],
+ ' paired-threads': 2, 'memory': 1}]},
+ 'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+'''
+IMPORTANT NOTE
+The following unittest class does not have the 'test_' on purpose. This test is the one used for the
+scenario based tests.
+'''
+class descriptor_based_scenario_test(test_base):
+ test_index = 0
+ scenario_test_path = None
+
+ @classmethod
+ def setUpClass(cls):
+ cls.test_index = 1
+ cls.to_delete_list = []
+ cls.scenario_uuids = []
+ cls.instance_scenario_uuids = []
+ cls.scenario_test_path = test_config["test_directory"] + '/' + test_config["test_folder"]
+ logger.info("{}. {} {}".format(test_config["test_number"], cls.__name__, test_config["test_folder"]))
+
+ @classmethod
+ def tearDownClass(cls):
+ test_config["test_number"] += 1
+
+ def test_000_load_scenario(self):
+ self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name,
+ test_config["test_folder"])
+ self.__class__.test_index += 1
+ # load VNFD and NSD
+ descriptor_files = glob.glob(self.__class__.scenario_test_path+'/*.yaml')
+ vnf_descriptors = []
+ scenario_descriptors = []
+ for descriptor_file in descriptor_files:
+ with open(descriptor_file, 'r') as stream:
+ descriptor = yaml.load(stream, Loader=yaml.Loader)
+ if "vnf" in descriptor or "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
+ vnf_descriptors.append(descriptor)
+ else:
+ scenario_descriptors.append(descriptor)
+
+ scenario_file = glob.glob(self.__class__.scenario_test_path + '/scenario_*.yaml')
+ if not vnf_descriptors or not scenario_descriptors or len(scenario_descriptors) > 1:
+ raise Exception("Test '{}' not valid. It must contain an scenario file and at least one vnfd file'".format(
+ test_config["test_folder"]))
+
+ # load all vnfd
+ for vnf_descriptor in vnf_descriptors:
+ logger.debug("VNF descriptor: {}".format(vnf_descriptor))
+ vnf = test_config["client"].create_vnf(descriptor=vnf_descriptor, image_name=test_config["image_name"])
+ logger.debug(vnf)
+ if 'vnf' in vnf:
+ vnf_uuid = vnf['vnf']['uuid']
+ else:
+ vnf_uuid = vnf['vnfd'][0]['uuid']
+ self.__class__.to_delete_list.insert(0, {"item": "vnf", "function": test_config["client"].delete_vnf,
+ "params": {"uuid": vnf_uuid}})
+
+ # load the scenario definition
+ for scenario_descriptor in scenario_descriptors:
+ # networks = scenario_descriptor['scenario']['networks']
+ # networks[test_config["mgmt_net"]] = networks.pop('mgmt')
+ logger.debug("Scenario descriptor: {}".format(scenario_descriptor))
+ scenario = test_config["client"].create_scenario(descriptor=scenario_descriptor)
+ logger.debug(scenario)
+ if 'scenario' in scenario:
+ scenario_uuid = scenario['scenario']['uuid']
+ else:
+ scenario_uuid = scenario['nsd'][0]['uuid']
+ self.__class__.to_delete_list.insert(0, {"item": "scenario",
+ "function": test_config["client"].delete_scenario,
+ "params": {"uuid": scenario_uuid}})
+ self.__class__.scenario_uuids.append(scenario_uuid)
+
+ def test_010_instantiate_scenario(self):
+ self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name,
+ test_config["test_folder"])
+ self.__class__.test_index += 1
+ for scenario_uuid in self.__class__.scenario_uuids:
+ instance_descriptor = {
+ "instance":{
+ "name": self.__class__.test_text,
+ "scenario": scenario_uuid,
+ "networks":{
+ "mgmt": {"sites": [ { "netmap-use": test_config["mgmt_net"]} ]}
+ }
+ }
+ }
+ instance = test_config["client"].create_instance(instance_descriptor)
+ self.__class__.instance_scenario_uuids.append(instance['uuid'])
+ logger.debug(instance)
+ self.__class__.to_delete_list.insert(0, {"item": "instance",
+ "function": test_config["client"].delete_instance,
+ "params": {"uuid": instance['uuid']}})
+
+ def test_020_check_deployent(self):
+ self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name,
+ test_config["test_folder"])
+ self.__class__.test_index += 1
+
+ if test_config["manual"]:
+ input('Scenario has been deployed. Perform manual check and press any key to resume')
+ return
+
+ keep_waiting = test_config["timeout"]
+ pending_instance_scenario_uuids = list(self.__class__.instance_scenario_uuids) # make a copy
+ while pending_instance_scenario_uuids:
+ index = 0
+ while index < len(pending_instance_scenario_uuids):
+ result = check_instance_scenario_active(pending_instance_scenario_uuids[index])
+ if result[0]:
+ del pending_instance_scenario_uuids[index]
+ break
+ elif 'ERROR' in result[1]:
+ msg = 'Got error while waiting for the instance to get active: '+result[1]
+ logging.error(msg)
+ raise Exception(msg)
+ index += 1
+
+ if keep_waiting >= 5:
+ time.sleep(5)
+ keep_waiting -= 5
+ elif keep_waiting > 0:
+ time.sleep(keep_waiting)
+ keep_waiting = 0
+ else:
+ msg = 'Timeout reached while waiting instance scenario to get active'
+ logging.error(msg)
+ raise Exception(msg)
+
+ def test_030_clean_deployment(self):
+ self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+ inspect.currentframe().f_code.co_name,
+ test_config["test_folder"])
+ self.__class__.test_index += 1
+ #At the moment if you delete an scenario right after creating it, in openstack datacenters
+ #sometimes scenario ports get orphaned. This sleep is just a dirty workaround
+ time.sleep(5)
+ for item in self.__class__.to_delete_list:
+ response = item["function"](**item["params"])
+ logger.debug(response)
+
+
+def _get_random_string(maxLength):
+ '''generates a string with random characters string.letters and string.digits
+ with a random length up to maxLength characters. If maxLength is <15 it will be changed automatically to 15
+ '''
+ prefix = 'testing_'
+ min_string = 15
+ minLength = min_string - len(prefix)
+ if maxLength < min_string: maxLength = min_string
+ maxLength -= len(prefix)
+ length = random.randint(minLength,maxLength)
+ return 'testing_'+"".join([random.choice(string.ascii_letters+string.digits) for i in range(length)])
+
+
+def test_vimconnector(args):
+ global test_config
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+ test_config['vimtype'] = args.vimtype
+ if args.vimtype == "vmware":
+ from osm_rovim_vmware import vimconn_vmware as vim
+
+ test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+ tenant_name = args.tenant_name
+ test_config['tenant'] = tenant_name
+ config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+ org_name = config_params.get('orgname')
+ org_user = config_params.get('user')
+ org_passwd = config_params.get('passwd')
+ vim_url = args.endpoint_url
+ test_config['image_path'] = args.image_path
+ test_config['image_name'] = args.image_name
+ test_config['sriov_net_name'] = args.sriov_net_name
+
+ # vmware connector obj
+ test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,
+ passwd=org_passwd, url=vim_url, config=config_params)
+
+ elif args.vimtype == "aws":
+ from osm_rovim_aws import vimconn_aws as vim
+ elif args.vimtype == "openstack":
+ from osm_rovim_openstack import vimconn_openstack as vim
+
+ test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+ tenant_name = args.tenant_name
+ test_config['tenant'] = tenant_name
+ config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+ os_user = config_params.get('user')
+ os_passwd = config_params.get('passwd')
+ vim_url = args.endpoint_url
+ test_config['image_path'] = args.image_path
+ test_config['image_name'] = args.image_name
+ test_config['sriov_net_name'] = args.sriov_net_name
+
+ # openstack connector obj
+ vim_persistent_info = {}
+ test_config['vim_conn'] = vim.vimconnector(
+ uuid="test-uuid-1", name="VIO-openstack",
+ tenant_id=None, tenant_name=tenant_name,
+ url=vim_url, url_admin=None,
+ user=os_user, passwd=os_passwd,
+ config=config_params, persistent_info=vim_persistent_info
+ )
+ test_config['vim_conn'].debug = "true"
+
+ elif args.vimtype == "openvim":
+ from osm_rovim_openvim import vimconn_openvim as vim
+ elif args.vimtype == "azure":
+ from osm_rovim_azure import vimconn_azure as vim
+
+ test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+ tenant_name = args.tenant_name
+ test_config['tenant'] = tenant_name
+ config_params = yaml.load(args.config_param)
+ os_user = config_params.get('user')
+ os_passwd = config_params.get('passwd')
+ vim_url = args.endpoint_url
+ test_config['image_path'] = args.image_path
+ test_config['image_name'] = args.image_name
+ #test_config['sriov_net_name'] = args.sriov_net_name
+ args_log_level = "DEBUG" if args.debug else "INFO"
+
+ # azure connector obj
+ vim_persistent_info = {}
+ test_config['vim_conn'] = vim.vimconnector(
+ uuid="test-uuid-1", name="VIO-azure",
+ tenant_id=None, tenant_name=tenant_name,
+ url=vim_url, url_admin=None,
+ user=os_user, passwd=os_passwd, log_level= args_log_level,
+ config=config_params, persistent_info=vim_persistent_info
+ )
+ test_config['vim_conn'].debug = "true"
+
+ else:
+ logger.critical("vimtype '{}' not supported".format(args.vimtype))
+ sys.exit(1)
+ executed = 0
+ failed = 0
+ clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+ # If only want to obtain a tests list print it and exit
+ if args.list_tests:
+ tests_names = []
+ for cls in clsmembers:
+ if cls[0].startswith('test_vimconn'):
+ tests_names.append(cls[0])
+
+ msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names))
+ print(msg)
+ logger.info(msg)
+ sys.exit(0)
+
+ # Create the list of tests to be run
+ code_based_tests = []
+ if args.tests:
+ for test in args.tests:
+ for t in test.split(','):
+ matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+ if len(matches_code_based_tests) > 0:
+ code_based_tests.append(matches_code_based_tests[0][1])
+ else:
+ logger.critical("Test '{}' is not among the possible ones".format(t))
+ sys.exit(1)
+ if not code_based_tests:
+ # include all tests
+ for cls in clsmembers:
+ # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+ if cls[0].startswith('test_vimconn'):
+ code_based_tests.append(cls[1])
+
+ logger.debug("tests to be executed: {}".format(code_based_tests))
+
+ # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+ # This is handled in the tests using logging.
+ stream = open('/dev/null', 'w')
+
+ # Run code based tests
+ basic_tests_suite = unittest.TestSuite()
+ for test in code_based_tests:
+ basic_tests_suite.addTest(unittest.makeSuite(test))
+ result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+ executed += result.testsRun
+ failed += len(result.failures) + len(result.errors)
+ if failfast and failed:
+ sys.exit(1)
+ if len(result.failures) > 0:
+ logger.debug("failures : {}".format(result.failures))
+ if len(result.errors) > 0:
+ logger.debug("errors : {}".format(result.errors))
+ return executed, failed
+
+
+def test_vim(args):
+ global test_config
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+ from osm_ro import openmanoclient
+ executed = 0
+ failed = 0
+ test_config["client"] = openmanoclient.openmanoclient(
+ endpoint_url=args.endpoint_url,
+ tenant_name=args.tenant_name,
+ datacenter_name=args.datacenter,
+ debug=args.debug, logger=test_config["logger_name"])
+ clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+ # If only want to obtain a tests list print it and exit
+ if args.list_tests:
+ tests_names = []
+ for cls in clsmembers:
+ if cls[0].startswith('test_VIM'):
+ tests_names.append(cls[0])
+
+ msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+ "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+ "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+ print(msg)
+ logger.info(msg)
+ sys.exit(0)
+
+ # Create the list of tests to be run
+ code_based_tests = []
+ if args.tests:
+ for test in args.tests:
+ for t in test.split(','):
+ matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+ if len(matches_code_based_tests) > 0:
+ code_based_tests.append(matches_code_based_tests[0][1])
+ else:
+ logger.critical("Test '{}' is not among the possible ones".format(t))
+ sys.exit(1)
+ if not code_based_tests:
+ # include all tests
+ for cls in clsmembers:
+ # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+ if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+ code_based_tests.append(cls[1])
+
+ logger.debug("tests to be executed: {}".format(code_based_tests))
+
+ # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+ # This is handled in the tests using logging.
+ stream = open('/dev/null', 'w')
+
+ # Run code based tests
+ basic_tests_suite = unittest.TestSuite()
+ for test in code_based_tests:
+ basic_tests_suite.addTest(unittest.makeSuite(test))
+ result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+ executed += result.testsRun
+ failed += len(result.failures) + len(result.errors)
+ if failfast and failed:
+ sys.exit(1)
+ if len(result.failures) > 0:
+ logger.debug("failures : {}".format(result.failures))
+ if len(result.errors) > 0:
+ logger.debug("errors : {}".format(result.errors))
+ return executed, failed
+
+
+def test_wim(args):
+ global test_config
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+ from osm_ro import openmanoclient
+ executed = 0
+ failed = 0
+ test_config["client"] = openmanoclient.openmanoclient(
+ endpoint_url=args.endpoint_url,
+ tenant_name=args.tenant_name,
+ datacenter_name=args.datacenter,
+ debug=args.debug, logger=test_config["logger_name"])
+ clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+ # If only want to obtain a tests list print it and exit
+ if args.list_tests:
+ tests_names = []
+ for cls in clsmembers:
+ if cls[0].startswith('test_WIM'):
+ tests_names.append(cls[0])
+
+ msg = "The 'wim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+ "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+ "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+ print(msg)
+ logger.info(msg)
+ sys.exit(0)
+
+ # Create the list of tests to be run
+ code_based_tests = []
+ if args.tests:
+ for test in args.tests:
+ for t in test.split(','):
+ matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+ if len(matches_code_based_tests) > 0:
+ code_based_tests.append(matches_code_based_tests[0][1])
+ else:
+ logger.critical("Test '{}' is not among the possible ones".format(t))
+ sys.exit(1)
+ if not code_based_tests:
+ # include all tests
+ for cls in clsmembers:
+ # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+ if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+ code_based_tests.append(cls[1])
+
+ logger.debug("tests to be executed: {}".format(code_based_tests))
+
+ # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+ # This is handled in the tests using logging.
+ stream = open('/dev/null', 'w')
+
+ # Run code based tests
+ basic_tests_suite = unittest.TestSuite()
+ for test in code_based_tests:
+ basic_tests_suite.addTest(unittest.makeSuite(test))
+ result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+ executed += result.testsRun
+ failed += len(result.failures) + len(result.errors)
+ if failfast and failed:
+ sys.exit(1)
+ if len(result.failures) > 0:
+ logger.debug("failures : {}".format(result.failures))
+ if len(result.errors) > 0:
+ logger.debug("errors : {}".format(result.errors))
+ return executed, failed
+
+
+def test_deploy(args):
+ global test_config
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+ from osm_ro import openmanoclient
+ executed = 0
+ failed = 0
+ test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+ test_config["image_name"] = args.image_name
+ test_config["mgmt_net"] = args.mgmt_net
+ test_config["manual"] = args.manual
+ test_directory_content = os.listdir(test_config["test_directory"])
+ # If only want to obtain a tests list print it and exit
+ if args.list_tests:
+ msg = "the 'deploy' set tests are:\n\t" + ', '.join(sorted(test_directory_content))
+ print(msg)
+ # logger.info(msg)
+ sys.exit(0)
+
+ descriptor_based_tests = []
+ # Create the list of tests to be run
+ code_based_tests = []
+ if args.tests:
+ for test in args.tests:
+ for t in test.split(','):
+ if t in test_directory_content:
+ descriptor_based_tests.append(t)
+ else:
+ logger.critical("Test '{}' is not among the possible ones".format(t))
+ sys.exit(1)
+ if not descriptor_based_tests:
+ # include all tests
+ descriptor_based_tests = test_directory_content
+
+ logger.debug("tests to be executed: {}".format(code_based_tests))
+
+ # import openmanoclient from relative path
+ test_config["client"] = openmanoclient.openmanoclient(
+ endpoint_url=args.endpoint_url,
+ tenant_name=args.tenant_name,
+ datacenter_name=args.datacenter,
+ debug=args.debug, logger=test_config["logger_name"])
+
+ # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+ # This is handled in the tests using logging.
+ stream = open('/dev/null', 'w')
+ # This scenario based tests are defined as directories inside the directory defined in 'test_directory'
+ for test in descriptor_based_tests:
+ test_config["test_folder"] = test
+ test_suite = unittest.TestSuite()
+ test_suite.addTest(unittest.makeSuite(descriptor_based_scenario_test))
+ result = unittest.TextTestRunner(stream=stream, failfast=False).run(test_suite)
+ executed += result.testsRun
+ failed += len(result.failures) + len(result.errors)
+ if failfast and failed:
+ sys.exit(1)
+ if len(result.failures) > 0:
+ logger.debug("failures : {}".format(result.failures))
+ if len(result.errors) > 0:
+ logger.debug("errors : {}".format(result.errors))
+
+ return executed, failed
+
+if __name__=="__main__":
+
+ parser = ArgumentParser(description='Test RO module')
+ parser.add_argument('-v','--version', action='version', help="Show current version",
+ version='%(prog)s version ' + __version__ + ' ' + version_date)
+
+ # Common parameters
+ parent_parser = ArgumentParser(add_help=False)
+ parent_parser.add_argument('--failfast', help='Stop when a test fails rather than execute all tests',
+ dest='failfast', action="store_true", default=False)
+ parent_parser.add_argument('--failed', help='Set logs to show only failed tests. --debug disables this option',
+ dest='failed', action="store_true", default=False)
+ default_logger_file = os.path.dirname(__file__)+'/'+os.path.splitext(os.path.basename(__file__))[0]+'.log'
+ parent_parser.add_argument('--list-tests', help='List all available tests', dest='list_tests', action="store_true",
+ default=False)
+ parent_parser.add_argument('--logger_file', dest='logger_file', default=default_logger_file,
+ help='Set the logger file. By default '+default_logger_file)
+ parent_parser.add_argument("-t", '--tenant', dest='tenant_name', default="osm",
+ help="Set the openmano tenant to use for the test. By default 'osm'")
+ parent_parser.add_argument('--debug', help='Set logs to debug level', dest='debug', action="store_true")
+ parent_parser.add_argument('--timeout', help='Specify the instantiation timeout in seconds. By default 300',
+ dest='timeout', type=int, default=300)
+ parent_parser.add_argument('--test', '--tests', help='Specify the tests to run', dest='tests', action="append")
+
+ subparsers = parser.add_subparsers(help='test sets')
+
+ # Deployment test set
+ # -------------------
+ deploy_parser = subparsers.add_parser('deploy', parents=[parent_parser],
+ help="test deployment using descriptors at RO_test folder ")
+ deploy_parser.set_defaults(func=test_deploy)
+
+ # Mandatory arguments
+ mandatory_arguments = deploy_parser.add_argument_group('mandatory arguments')
+ mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+ mandatory_arguments.add_argument("-i", '--image-name', required=True, dest="image_name",
+ help='Image name available at datacenter used for the tests')
+ mandatory_arguments.add_argument("-n", '--mgmt-net-name', required=True, dest='mgmt_net',
+ help='Set the vim management network to use for tests')
+
+ # Optional arguments
+ deploy_parser.add_argument('-m', '--manual-check', dest='manual', action="store_true", default=False,
+ help='Pause execution once deployed to allow manual checking of the '
+ 'deployed instance scenario')
+ deploy_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+ help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+ # Vimconn test set
+ # -------------------
+ vimconn_parser = subparsers.add_parser('vimconn', parents=[parent_parser], help="test vimconnector plugin")
+ vimconn_parser.set_defaults(func=test_vimconnector)
+ # Mandatory arguments
+ mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+ mandatory_arguments.add_argument('--vimtype', choices=['vmware', 'aws', 'openstack', 'openvim','azure'], required=True,
+ help='Set the vimconnector type to test')
+ mandatory_arguments.add_argument('-c', '--config', dest='config_param', required=True,
+ help='Set the vimconnector specific config parameters in dictionary format')
+ mandatory_arguments.add_argument('-u', '--url', dest='endpoint_url',required=True, help="Set the vim connector url or Host IP")
+ # Optional arguments
+ vimconn_parser.add_argument('-i', '--image-path', dest='image_path', help="Provide image path present at RO container")
+ vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
+ # TODO add optional arguments for vimconn tests
+ # vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
+ vimconn_parser.add_argument('-s', '--sriov-net-name', dest='sriov_net_name', help="Provide SRIOV network name for test")
+
+ # Datacenter test set
+ # -------------------
+ vimconn_parser = subparsers.add_parser('vim', parents=[parent_parser], help="test vim")
+ vimconn_parser.set_defaults(func=test_vim)
+
+ # Mandatory arguments
+ mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+ mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+
+ # Optional arguments
+ vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+ help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+ # WIM test set
+ # -------------------
+ vimconn_parser = subparsers.add_parser('wim', parents=[parent_parser], help="test wim")
+ vimconn_parser.set_defaults(func=test_wim)
+
+ # Mandatory arguments
+ mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+ mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+
+ # Optional arguments
+ vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+ help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+ argcomplete.autocomplete(parser)
+ args = parser.parse_args()
+ # print str(args)
+ test_config = {}
+
+ # default logger level is INFO. Options --debug and --failed override this, being --debug prioritary
+ logger_level = 'INFO'
+ if args.debug:
+ logger_level = 'DEBUG'
+ elif args.failed:
+ logger_level = 'WARNING'
+ logger_name = os.path.basename(__file__)
+ test_config["logger_name"] = logger_name
+ logger = logging.getLogger(logger_name)
+ logger.setLevel(logger_level)
+ failfast = args.failfast
+
+ # Configure a logging handler to store in a logging file
+ if args.logger_file:
+ fileHandler = logging.FileHandler(args.logger_file)
+ formatter_fileHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+ fileHandler.setFormatter(formatter_fileHandler)
+ logger.addHandler(fileHandler)
+
+ # Configure a handler to print to stdout
+ consoleHandler = logging.StreamHandler(sys.stdout)
+ formatter_consoleHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+ consoleHandler.setFormatter(formatter_consoleHandler)
+ logger.addHandler(consoleHandler)
+
+ logger.debug('Program started with the following arguments: ' + str(args))
+
+ # set test config parameters
+ test_config["timeout"] = args.timeout
+ test_config["test_number"] = 1
+
+ executed, failed = args.func(args)
+
+ # Log summary
+ logger.warning("Total number of tests: {}; Total number of failures/errors: {}".format(executed, failed))
+ sys.exit(1 if failed else 0)
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# Author: Alfonso Tierno (alfonso.tiernosepulveda@telefonica.com)
+
+description="It creates a new lxc container, installs RO from a concrete commit and executes validation tests.\
+ An openvim in test mode is installed and used to validate"
+
+usage(){
+ echo -e "usage: ${BASH_SOURCE[0]} CONTAINER\n ${description}"
+ echo -e " CONTAINER is the name of the container to be created. By default test1"\
+ "Warning! if a container with the same name exists, it will be deleted"
+ echo -e " You must also supply at TEST_RO_COMMIT envioronmental variable with the git command"\
+ "to clone the version under test. It can be copy paste from gerrit. Examples:\n"\
+ " TEST_RO_COMMIT='git fetch https://osm.etsi.org/gerrit/osm/RO refs/changes/40/5540/1 && git checkout FETCH_HEAD'\n"\
+ " TEST_RO_COMMIT='git checkout v3.0.1'"
+ echo -e " You can provide TEST_RO_GIT_URL, by default https://osm.etsi.org/gerrit/osm/RO is used"
+ echo -e " You can provide TEST_RO_CONTAINER instead of by parameter, by default test1"
+ echo -e " You can provide TEST_RO_CUSTOM, with a command for container customization, by default nothing."
+}
+
+[ "$1" = "--help" ] || [ "$1" = "-h" ] && usage && exit 0
+
+[[ -z "$TEST_RO_COMMIT" ]] && echo 'provide a TEST_RO_COMMIT variable. Type --help for more info' >&2 && exit 1
+[[ -z "$TEST_RO_GIT_URL" ]] && TEST_RO_GIT_URL="https://osm.etsi.org/gerrit/osm/RO"
+
+[ -n "$1" ] && TEST_RO_CONTAINER="$1"
+[[ -z "$TEST_RO_CONTAINER" ]] && TEST_RO_CONTAINER=test1
+
+instance_name=3vdu_2vnf
+
+function echo_RO_log(){
+ # echo "LOG DUMP:" >&2 && lxc exec "$TEST_RO_CONTAINER" -- tail -n 150 /var/log/osm/openmano.log >&2
+ echo -e "\nFAILED" >&2
+}
+
+function lxc_exec(){
+ if ! lxc exec "$TEST_RO_CONTAINER" --env OPENMANO_TENANT=osm --env OPENMANO_DATACENTER=local-openvim \
+ --env OPENVIM_TENANT="$OPENVIM_TENANT" -- bash -c "$*"
+ then
+ echo "ERROR on command '$*'" >&2
+ echo_RO_log
+ exit 1
+ fi
+}
+
+function wait_until_deleted(){
+ wait_active=0
+ while lxc_exec RO/test/local/openvim/openvim vm-list | grep -q -e ${instance_name} ||
+ lxc_exec RO/test/local/openvim/openvim net-list | grep -q -e ${instance_name}
+ do
+ echo -n "."
+ [ $wait_active -gt 90 ] && echo "timeout waiting VM and nets deleted at VIM" >&2 && echo_RO_log && exit 1
+ wait_active=$((wait_active + 1))
+ sleep 1
+ done
+ echo
+}
+
+lxc delete "$TEST_RO_CONTAINER" --force 2>/dev/null && echo "container '$TEST_RO_CONTAINER' deleted"
+lxc launch ubuntu:16.04 "$TEST_RO_CONTAINER"
+sleep 10
+[[ -n "$TEST_RO_CUSTOM" ]] && ${TEST_RO_CUSTOM}
+lxc_exec ifconfig eth0 mtu 1446 # Avoid problems when inside an openstack VM that normally limit MTU do this value
+lxc_exec git clone "$TEST_RO_GIT_URL"
+lxc_exec git -C RO status
+lxc_exec "cd RO && $TEST_RO_COMMIT"
+
+# TEST INSTALL
+lxc_exec RO/scripts/install-openmano.sh --noclone --force -q --updatedb -b master
+sleep 10
+lxc_exec openmano tenant-create osm
+lxc_exec openmano tenant-list
+
+# TEST database migration
+lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
+lxc_exec ./RO/database_utils/migrate_mano_db.sh
+lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
+lxc_exec ./RO/database_utils/migrate_mano_db.sh
+
+# TEST instantiate with a fake local openvim
+lxc_exec ./RO/test/basictest.sh -f --insert-bashrc --install-openvim reset add-openvim create delete
+
+
+# TEST instantiate with a fake local openvim 2
+lxc_exec ./RO/test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
+lxc_exec ./RO/test/test_RO.py vim -t osm -d local-openvim --timeout=30 --failfast
+
+sleep 10
+echo "TEST service restart in the middle of a instantiation/deletion"
+OPENVIM_TENANT=`lxc_exec RO/test/local/openvim/openvim tenant-list`
+OPENVIM_TENANT=${OPENVIM_TENANT%% *}
+
+lxc_exec openmano vnf-create RO/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034
+lxc_exec openmano scenario-create RO/scenarios/examples/v3_3vdu_2vnf_nsd.yaml
+wait_until_deleted
+test_number=0
+while [ $test_number -lt 5 ] ; do
+ echo test ${test_number}.0 test instantiation recovering
+ lxc_exec openmano instance-scenario-create --name ${instance_name} --scenario osm_id=3vdu_2vnf_nsd";"service osm-ro stop
+ sleep 5
+ lxc_exec service osm-ro start
+ sleep 10
+ # wait until all VM are active
+ wait_active=0
+ while [ `lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l` -lt 7 ] ; do
+ echo -n "."
+ [ $wait_active -gt 90 ] && echo "timeout waiting VM active" >&2 && echo_RO_log && exit 1
+ wait_active=$((wait_active + 1))
+ sleep 1
+ done
+ echo
+
+ # Due to race condition the VIM request can be processed without getting the response by RO
+ # resulting in having some VM or net at VIM not registered by RO. If this is the case need to be deleted manually
+ vim_vms=`lxc_exec RO/test/local/openvim/openvim vm-list | grep ${instance_name} | awk '{print $1}'`
+ for vim_vm in $vim_vms ; do
+ if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_vm ; then
+ echo deleting VIM vm $vim_vm
+ lxc_exec RO/test/local/openvim/openvim vm-delete -f $vim_vm
+ fi
+ done
+ vim_nets=`lxc_exec RO/test/local/openvim/openvim net-list | grep ${instance_name} | awk '{print $1}'`
+ for vim_net in $vim_nets ; do
+ if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_net ; then
+ echo deleting VIM net $vim_net
+ lxc_exec RO/test/local/openvim/openvim net-delete -f $vim_net
+ fi
+ done
+
+ # delete first VIM VM and wait until RO detects it
+ echo test ${test_number}.1 test refresh VM VIM status deleted
+ OPENVIM_VM=`lxc_exec RO/test/local/openvim/openvim vm-list`
+ OPENVIM_VM=${OPENVIM_VM%% *}
+ lxc_exec RO/test/local/openvim/openvim vm-delete -f $OPENVIM_VM
+ wait_active=0
+ while ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q DELETED ; do
+ echo -n "."
+ [ $wait_active -gt 90 ] && echo "timeout waiting RO get VM status as DELETED" >&2 && echo_RO_log && exit 1
+ wait_active=$((wait_active + 1))
+ sleep 1
+ ACTIVE=`lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l`
+ done
+ echo
+
+ # TEST service restart in the middle of a instantiation deletion
+ echo test ${test_number}.2 test isntantiation deletion recovering
+ lxc_exec openmano instance-scenario-delete ${instance_name} -f";"service osm-ro stop
+ sleep 5
+ lxc_exec service osm-ro start
+ sleep 10
+ # wait until all VM are deteled at VIM
+ wait_until_deleted
+
+ test_number=$((test_number + 1))
+done
+echo "DONE"
+
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano.
+#WARNING: It destroy the database content
+
+
+function usage(){
+ echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n test openmano with fake tenant, datancenters, etc."\
+ "It assumes that you have configured openmano cli with HOST,PORT,TENANT with environment variables"
+ "If not, it will use by default localhost:9080 and creates a new TENANT"
+ echo -e " -h --help shows this help"
+}
+
+function is_valid_uuid(){
+ echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+ return 1
+}
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+
+#detect paths of executables, preceding the relative paths
+openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
+service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
+ service_openmano="$DIRscript/service-openmano"
+initopenvim="initopenvim"
+openvim="openvim"
+
+function _exit()
+{
+ EXIT_STATUS=$1
+ for item in $ToDelete
+ do
+ command=${item%%:*}
+ uuid=${item#*:}
+ [[ $command == "datacenter-detach" ]] && force="" || force=-f
+ printf "%-50s" "$command $uuid:"
+ ! $openmano $command $uuid $force >> /dev/null && echo FAIL && EXIT_STATUS=1 || echo OK
+ done
+ [[ ${BASH_SOURCE[0]} != $0 ]] && return $1 || exit $EXIT_STATUS
+}
+
+
+# process options
+source ${DIRscript}/get-options.sh "force:-f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
+ $* || _exit 1
+
+# help
+[ -n "$option_help" ] && usage && _exit 0
+
+
+ToDelete=""
+DCs="dc-fake1-openstack dc-fake2-openvim" #dc-fake3-vmware
+Ts="fake-tenant1 fake-tenand2"
+SDNs="sdn-fake1-opendaylight sdn-fake2-floodlight sdn-fake3-onos"
+
+for T in $Ts
+do
+ printf "%-50s" "Creating fake tenant '$T':"
+ ! result=`$openmano tenant-create "$T"` && echo FAIL && echo " $result" && _exit 1
+ tenant=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $tenant && echo "FAIL" && echo " $result" && _exit 1
+ echo $tenant
+ ToDelete="tenant-delete:$tenant $ToDelete"
+ [[ -z "$OPENMANO_TENANT" ]] && export OPENMANO_TENANT=$tenant
+done
+
+index=0
+for DC in $DCs
+do
+ index=$((index+1))
+ printf "%-50s" "Creating datacenter '$DC':"
+ ! result=`$openmano datacenter-create "$DC" "http://$DC/v2.0" --type=${DC##*-} --config='{insecure: True}'` &&
+ echo FAIL && echo " $result" && _exit 1
+ datacenter=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $datacenter && echo "FAIL" && echo " $result" && _exit 1
+ echo $datacenter
+ eval DC${index}=$datacenter
+ ToDelete="datacenter-delete:$datacenter $ToDelete"
+ [[ -z "$datacenter_empty" ]] && datacenter_empty=datacenter
+
+ printf "%-50s" "Attaching openmano tenant to the datacenter:"
+ ! result=`$openmano datacenter-attach "$DC" --vim-tenant-name=osm --config='{insecure: False}'` &&
+ echo FAIL && echo " $result" && _exit 1
+ ToDelete="datacenter-detach:$datacenter $ToDelete"
+ echo OK
+done
+
+printf "%-50s" "Datacenter list:"
+! result=`$openmano datacenter-list` &&
+ echo "FAIL" && echo " $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+ ! result=`$openmano datacenter-list "$DC" $verbose` &&
+ echo "FAIL" && echo " $result" && _exit 1
+done
+echo OK
+
+dpid_prefix=55:56:57:58:59:60:61:0
+dpid_sufix=0
+for SDN in $SDNs
+do
+ printf "%-50s" "Creating SDN controller '$SDN':"
+ ! result=`$openmano sdn-controller-create "$SDN" --ip 4.5.6.7 --port 80 --type=${SDN##*-} \
+ --user user --passwd p --dpid=${dpid_prefix}${dpid_sufix}` && echo "FAIL" && echo " $result" && _exit 1
+ sdn=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $sdn && echo "FAIL" && echo " $result" && _exit 1
+ echo $sdn
+ ToDelete="sdn-controller-delete:$sdn $ToDelete"
+ dpid_sufix=$((dpid_sufix+1))
+
+done
+printf "%-50s" "Edit SDN-controller:"
+for edit in user=u password=p ip=5.6.6.7 port=81 name=name dpid=45:55:54:45:44:44:55:67
+do
+ ! result=`$openmano sdn-controller-edit $sdn -f --"${edit}"` &&
+ echo "FAIL" && echo " $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "SDN-controller list:"
+! result=`$openmano sdn-controller-list` &&
+ echo "FAIL" && echo " $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+ ! result=`$openmano sdn-controller-list "$sdn" $verbose` &&
+ echo "FAIL" && echo " $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Add sdn to datacenter:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "List Port mapping:"
+for verbose in "" -v -vv -vvv
+do
+ ! result=`$openmano datacenter-sdn-port-mapping-list "$DC" $verbose` &&
+ echo "FAIL" && echo " $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Remove datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller null` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Negative list port mapping:"
+result=`$openmano datacenter-sdn-port-mapping-list $DC` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Add again datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+printf "%-50s" "Empty list port mapping:"
+! [[ `$openmano datacenter-sdn-port-mapping-list $DC | wc -l` -eq 6 ]] &&
+ echo "FAIL" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+ echo "FAIL" && echo " $result" && _exit 1 || echo OK
+
+_exit 0
+
--- /dev/null
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Module to test openmanoclient class and indirectly the whole openmano
+It allows both python 2 and python 3
+'''
+__author__="Alfonso Tierno"
+__date__ ="$09-Mar-2016 09:09:48$"
+__version__="0.0.2"
+version_date="May 2016"
+
+import logging
+import imp
+
+
+
+def _get_random_name(maxLength):
+ '''generates a string with random craracters from space (ASCCI 32) to ~(ASCCI 126)
+ with a random length up to maxLength
+ '''
+ long_name = "testing up to {} size name: ".format(maxLength)
+ #long_name += ''.join(chr(random.randint(32,126)) for _ in range(random.randint(20, maxLength-len(long_name))))
+ long_name += ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20, maxLength-len(long_name)))
+ return long_name
+
+
+if __name__=="__main__":
+ import getopt
+ #import os
+ import sys
+
+
+
+ usage =\
+ """Make a test against an openmano server.\nUsage: test_openmanoclient [options]
+ -v|--verbose: prints more info in the test
+ --version: shows current version
+ -h|--help: shows this help
+ -d|--debug: set logs to debug level
+ -t|--tenant: set the tenant name to test. By default creates one
+ --datacenter: set the datacenter name to test. By default creates one at http://localhost:9080/openvim
+ -u|--url: set the openmano server url. By default 'http://localhost:9090/openmano'
+ --image: use this image path for testing a VNF. By default a fake one is generated, valid for VIM in test mode'
+ """
+
+ #import openmanoclient from relative path
+ module_info = imp.find_module("openmanoclient", [".."] )
+ Client = imp.load_module("Client", *module_info)
+
+ streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
+ logging.basicConfig(format=streamformat)
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "t:u:dhv", ["url=", "tenant=", "debug", "help", "version", "verbose", "datacenter=", "image="])
+ except getopt.GetoptError as err:
+ print ("Error: {}\n Try '{} --help' for more information".format(str(err), sys.argv[0]))
+ sys.exit(2)
+
+ debug = False
+ verbose = False
+ url = "http://localhost:9090/openmano"
+ to_delete_list=[]
+ test_tenant = None
+ test_datacenter = None
+ test_vim_tenant = None
+ test_image = None
+ for o, a in opts:
+ if o in ("-v", "--verbose"):
+ verbose = True
+ elif o in ("--version"):
+ print ("{} version".format(sys.argv[0]), __version__, version_date)
+ print ("(c) Copyright Telefonica")
+ sys.exit()
+ elif o in ("-h", "--help"):
+ print(usage)
+ sys.exit()
+ elif o in ("-d", "--debug"):
+ debug = True
+ elif o in ("-u", "--url"):
+ url = a
+ elif o in ("-t", "--tenant"):
+ test_tenant = a
+ elif o in ("--datacenter"):
+ test_datacenter = a
+ elif o in ("--image"):
+ test_image = a
+ else:
+ assert False, "Unhandled option"
+
+
+
+ client = Client.openmanoclient(
+ endpoint_url=url,
+ tenant_name=test_tenant,
+ datacenter_name = test_datacenter,
+ debug = debug)
+
+ import random
+ test_number=1
+
+ #TENANTS
+ print(" {}. TEST create_tenant".format(test_number))
+ test_number += 1
+ long_name = _get_random_name(60)
+
+ tenant = client.create_tenant(name=long_name, description=long_name)
+ if verbose: print(tenant)
+
+ print(" {}. TEST list_tenants".format(test_number))
+ test_number += 1
+ tenants = client.list_tenants()
+ if verbose: print(tenants)
+
+ print(" {}. TEST list_tenans filter by name".format(test_number))
+ test_number += 1
+ tenants_ = client.list_tenants(name=long_name)
+ if not tenants_["tenants"]:
+ raise Exception("Text error, no TENANT found with name")
+ if verbose: print(tenants_)
+
+ print(" {}. TEST get_tenant by UUID".format(test_number))
+ test_number += 1
+ tenant = client.get_tenant(uuid=tenants_["tenants"][0]["uuid"])
+ if verbose: print(tenant)
+
+ print(" {}. TEST delete_tenant by name".format(test_number))
+ test_number += 1
+ tenant = client.delete_tenant(name = long_name)
+ if verbose: print(tenant)
+
+ if not test_tenant:
+ print(" {}. TEST create_tenant for remaining tests".format(test_number))
+ test_number += 1
+ test_tenant = "test-tenant "+\
+ ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+ tenant = client.create_tenant(name = test_tenant)
+ if verbose: print(tenant)
+ client["tenant_name"] = test_tenant
+
+ to_delete_list.insert(0,{"item": "tenant", "function": client.delete_tenant, "params":{"name": test_tenant} })
+
+ #DATACENTERS
+ print(" {}. TEST create_datacenter".format(test_number))
+ test_number += 1
+ long_name = _get_random_name(60)
+
+ datacenter = client.create_datacenter(name=long_name, vim_url="http://fakeurl/fake")
+ if verbose: print(datacenter)
+
+ print(" {}. TEST list_datacenters".format(test_number))
+ test_number += 1
+ datacenters = client.list_datacenters(all_tenants=True)
+ if verbose: print(datacenters)
+
+ print(" {}. TEST list_tenans filter by name".format(test_number))
+ test_number += 1
+ datacenters_ = client.list_datacenters(all_tenants=True, name=long_name)
+ if not datacenters_["datacenters"]:
+ raise Exception("Text error, no TENANT found with name")
+ if verbose: print(datacenters_)
+
+ print(" {}. TEST get_datacenter by UUID".format(test_number))
+ test_number += 1
+ datacenter = client.get_datacenter(uuid=datacenters_["datacenters"][0]["uuid"], all_tenants=True)
+ if verbose: print(datacenter)
+
+ print(" {}. TEST delete_datacenter by name".format(test_number))
+ test_number += 1
+ datacenter = client.delete_datacenter(name=long_name)
+ if verbose: print(datacenter)
+
+ if not test_datacenter:
+ print(" {}. TEST create_datacenter for remaining tests".format(test_number))
+ test_number += 1
+ test_datacenter = "test-datacenter "+\
+ ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+ datacenter = client.create_datacenter(name=test_datacenter, vim_url="http://127.0.0.1:9080/openvim")
+ if verbose: print(datacenter)
+ client["datacenter_name"] = test_datacenter
+ to_delete_list.insert(0,{"item": "datacenter", "function": client.delete_datacenter,
+ "params":{
+ "name": test_datacenter
+ }
+ })
+
+ print(" {}. TEST datacenter new tenenat".format(test_number))
+ test_number += 1
+ test_vim_tenant = "test-vimtenant "+\
+ ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+ vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True, name=test_vim_tenant)
+ if verbose: print(vim_tenant)
+ client["datacenter_name"] = test_datacenter
+ to_delete_list.insert(0,{"item": "vim_tenant",
+ "function": client.vim_action,
+ "params":{
+ "action":"delete",
+ "item":"tenants",
+ "datacenter_name": test_datacenter,
+ "all_tenants": True,
+ "uuid": vim_tenant["tenant"]["id"]
+ }
+ })
+
+ print(" {}. TEST datacenter attach".format(test_number))
+ test_number += 1
+ datacenter = client.attach_datacenter(name=test_datacenter, vim_tenant_name=test_vim_tenant)
+ if verbose: print(datacenter)
+ client["datacenter_name"] = test_datacenter
+ to_delete_list.insert(0,{"item": "datacenter-detach", "function": client.detach_datacenter, "params":{"name": test_datacenter} })
+
+ client["datacenter_name"] = test_datacenter
+
+ # WIMs
+ print(" {}. TEST create_wim".format(test_number))
+ test_number += 1
+ long_name = _get_random_name(60)
+
+ wim = client.create_wim(name=long_name, wim_url="http://fakeurl/fake")
+ if verbose: print(wim)
+
+ print(" {}. TEST list_wims".format(test_number))
+ test_number += 1
+ wims = client.list_wims(all_tenants=True)
+ if verbose: print(wims)
+
+ print(" {}. TEST list_tenans filter by name".format(test_number))
+ test_number += 1
+ wims_ = client.list_wims(all_tenants=True, name=long_name)
+ if not wims_["wims"]:
+ raise Exception("Text error, no TENANT found with name")
+ if verbose: print(wims_)
+
+ print(" {}. TEST get_wim by UUID".format(test_number))
+ test_number += 1
+ wim = client.get_wim(uuid=wims_["wims"][0]["uuid"], all_tenants=True)
+ if verbose: print(wim)
+
+ print(" {}. TEST delete_wim by name".format(test_number))
+ test_number += 1
+ wim = client.delete_wim(name=long_name)
+ if verbose: print(wim)
+
+ print(" {}. TEST create_wim for remaining tests".format(test_number))
+ test_number += 1
+ test_wim = "test-wim " + \
+ ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+ wim = client.create_wim(name=test_wim, vim_url="http://127.0.0.1:9080/odl")
+ if verbose: print(wim)
+ to_delete_list.insert(0,
+ {
+ "item": "wim", "function": client.delete_wim,
+ "params":
+ {
+ "name": test_wim
+ }
+ })
+
+ test_wim_tenant = "test-wimtenant " + \
+ ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+
+ # print(" {}. TEST datacenter new tenenat".format(test_number))
+ # test_number += 1
+ # test_vim_tenant = "test-vimtenant " + \
+ # ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+ # vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True,
+ # name=test_vim_tenant)
+ # if verbose: print(vim_tenant)
+ # client["datacenter_name"] = test_datacenter
+ # to_delete_list.insert(0, {"item": "vim_tenant",
+ # "function": client.vim_action,
+ # "params": {
+ # "action": "delete",
+ # "item": "tenants",
+ # "datacenter_name": test_datacenter,
+ # "all_tenants": True,
+ # "uuid": vim_tenant["tenant"]["id"]
+ # }
+ # })
+
+ print(" {}. TEST wim attach".format(test_number))
+ test_number += 1
+ wim = client.attach_wim(name=test_wim, wim_tenant_name=test_wim_tenant)
+ if verbose: print(wim)
+ to_delete_list.insert(0, {"item": "wim-detach", "function": client.detach_wim,
+ "params": {"name": test_wim}})
+
+ #VIM_ACTIONS
+ print(" {}. TEST create_VIM_tenant".format(test_number))
+ test_number += 1
+ long_name = _get_random_name(60)
+
+ tenant = client.vim_action("create", "tenants", name=long_name)
+ if verbose: print(tenant)
+ tenant_uuid = tenant["tenant"]["id"]
+
+ print(" {}. TEST list_VIM_tenants".format(test_number))
+ test_number += 1
+ tenants = client.vim_action("list", "tenants")
+ if verbose: print(tenants)
+
+ print(" {}. TEST get_VIM_tenant by UUID".format(test_number))
+ test_number += 1
+ tenant = client.vim_action("show", "tenants", uuid=tenant_uuid)
+ if verbose: print(tenant)
+
+ print(" {}. TEST delete_VIM_tenant by id".format(test_number))
+ test_number += 1
+ tenant = client.vim_action("delete", "tenants", uuid = tenant_uuid)
+ if verbose: print(tenant)
+
+ print(" {}. TEST create_VIM_network".format(test_number))
+ test_number += 1
+ long_name = _get_random_name(60)
+
+ network = client.vim_action("create", "networks", name=long_name)
+ if verbose: print(network)
+ network_uuid = network["network"]["id"]
+
+ print(" {}. TEST list_VIM_networks".format(test_number))
+ test_number += 1
+ networks = client.vim_action("list", "networks")
+ if verbose: print(networks)
+
+ print(" {}. TEST get_VIM_network by UUID".format(test_number))
+ test_number += 1
+ network = client.vim_action("show", "networks", uuid=network_uuid)
+ if verbose: print(network)
+
+ print(" {}. TEST delete_VIM_network by id".format(test_number))
+ test_number += 1
+ network = client.vim_action("delete", "networks", uuid = network_uuid)
+ if verbose: print(network)
+ #VNFS
+ print(" {}. TEST create_vnf".format(test_number))
+ test_number += 1
+ test_vnf_name = _get_random_name(255)
+ if test_image:
+ test_vnf_path = test_image
+ else:
+ test_vnf_path = "/random/path/" + "".join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20))
+
+ vnf_descriptor={'vnf': {'name': test_vnf_name,
+ 'VNFC': [{'description': _get_random_name(255),
+ 'name': 'linux-VM',
+ 'VNFC image': test_vnf_path,
+ 'ram': 1024,
+ 'vcpus': 1,
+ 'bridge-ifaces': [{'name': 'eth0'}]
+ }],
+ 'description': _get_random_name(255),
+ 'nets': [],
+ 'external-connections': [{'name': 'eth0',
+ 'local_iface_name': 'eth0',
+ 'VNFC': 'linux-VM',
+ 'type': 'bridge'}],
+ 'public': False}}
+
+ vnf = client.create_vnf(descriptor=vnf_descriptor)
+ if verbose: print(vnf)
+ to_delete_list.insert(0,{"item": "vnf", "function": client.delete_vnf, "params":{"name": test_vnf_name} })
+
+ print(" {}. TEST list_vnfs".format(test_number))
+ test_number += 1
+ vnfs = client.list_vnfs()
+ if verbose: print(vnfs)
+
+ print(" {}. TEST list_vnfs filter by name".format(test_number))
+ test_number += 1
+ vnfs_ = client.list_vnfs(name=test_vnf_name)
+ if not vnfs_["vnfs"]:
+ raise Exception("Text error, no VNF found with name")
+ if verbose: print(vnfs_)
+
+ print(" {}. TEST get_vnf by UUID".format(test_number))
+ test_number += 1
+ vnf = client.get_vnf(uuid=vnfs_["vnfs"][0]["uuid"])
+ if verbose: print(vnf)
+
+ #SCENARIOS
+ print(" {}. TEST create_scenario".format(test_number))
+ test_number += 1
+ test_scenario_name = _get_random_name(255)
+
+ scenario_descriptor={ 'schema_version': 2,
+ 'scenario': {
+ 'name': test_scenario_name,
+ 'description': _get_random_name(255),
+ 'public': True,
+ 'vnfs':{
+ 'vnf1': {
+ 'vnf_name': test_vnf_name
+ }
+ },
+ 'networks':{
+ 'net1':{
+ 'external': True,
+ 'interfaces': [
+ {'vnf1': 'eth0'}
+ ]
+ }
+ }
+ }
+ }
+
+ scenario = client.create_scenario(descriptor=scenario_descriptor)
+ if verbose: print(scenario)
+ to_delete_list.insert(0,{"item": "scenario", "function": client.delete_scenario, "params":{"name": test_scenario_name} })
+
+ print(" {}. TEST list_scenarios".format(test_number))
+ test_number += 1
+ scenarios = client.list_scenarios()
+ if verbose: print(scenarios)
+
+ print(" {}. TEST list_scenarios filter by name".format(test_number))
+ test_number += 1
+ scenarios_ = client.list_scenarios(name=test_scenario_name)
+ if not scenarios_["scenarios"]:
+ raise Exception("Text error, no VNF found with name")
+ if verbose: print(scenarios_)
+
+ print(" {}. TEST get_scenario by UUID".format(test_number))
+ test_number += 1
+ scenario = client.get_scenario(uuid=scenarios_["scenarios"][0]["uuid"])
+ if verbose: print(scenario)
+
+
+
+ #INSTANCES
+ print(" {}. TEST create_instance".format(test_number))
+ test_number += 1
+ test_instance_name = _get_random_name(255)
+
+ instance_descriptor={ 'schema_version': 2,
+ 'instance': {
+ 'name': test_instance_name,
+ 'description': _get_random_name(255),
+ 'public': True,
+ 'vnfs':{
+ 'vnf1': {
+ 'vnf_name': test_vnf_name
+ }
+ },
+ 'networks':{
+ 'net1':{
+ 'external': True,
+ 'interfaces': [
+ {'vnf1': 'eth0'}
+ ]
+ }
+ }
+ }
+ }
+
+ instance = client.create_instance(scenario_name=test_scenario_name, name=test_instance_name )
+ if verbose: print(instance)
+ to_delete_list.insert(0,{"item": "instance", "function": client.delete_instance, "params":{"name": test_instance_name} })
+
+ print(" {}. TEST list_instances".format(test_number))
+ test_number += 1
+ instances = client.list_instances()
+ if verbose: print(instances)
+
+ print(" {}. TEST list_instances filter by name".format(test_number))
+ test_number += 1
+ instances_ = client.list_instances(name=test_instance_name)
+ if not instances_["instances"]:
+ raise Exception("Text error, no VNF found with name")
+ if verbose: print(instances_)
+
+ print(" {}. TEST get_instance by UUID".format(test_number))
+ test_number += 1
+ instance = client.get_instance(uuid=instances_["instances"][0]["uuid"])
+ if verbose: print(instance)
+
+
+
+
+ #DELETE Create things
+ for item in to_delete_list:
+ print(" {}. TEST delete_{}".format(test_number, item["item"]))
+ test_number += 1
+ response = item["function"](**item["params"])
+ if verbose: print(response)
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+test_osconnector.py makes a test over osconnector.py (openstack connector)
+credentiasl must be provided with environment bash variables or arguments
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$22-jun-2014 11:19:29$"
+
+
+import os
+import sys
+import getopt
+#import yaml
+#from jsonschema import validate as js_v, exceptions as js_e
+
+#load osconnector, insert openmano directory in the path
+r=sys.argv[0].rfind('/')
+if r<0:
+ osconnector_path=".."
+else:
+ osconnector_path=sys.argv[0][:r+1]+".."
+sys.path.insert(0, osconnector_path)
+#sys.path.insert(0, '/home/atierno/workspace/openmano/openmano')
+import osconnector
+
+version="0.1"
+
+def usage():
+ print("Usage: ", sys.argv[0], "[options]")
+ print(" -v|--version openstack version (by default 2)")
+ print(" -u|--username USER user to authenticate (by default bash:OS_USERNAME)")
+ print(" -p|--password PASSWD password to authenticate (by default bash:OS_PASSWORD)")
+ print(" -U|--auth_url URL url of authentication over keystone (by default bash:OS_AUTH_URL)")
+ print(" -t|--tenant_name TENANT password to authenticate (by default bash:OS_TENANT_NAME)")
+ print(" -i|--image IMAGE use this local path or url for loading image (by default cirros)")
+ print(" --skip-admin-tests skip tests that requires administrative permissions, like create tenants")
+ print(" -h|--help shows this help")
+ return
+
+def delete_items():
+ global myvim
+ global rollback_list
+ print("Making rollback, deleting items")
+ for i in range(len(rollback_list)-1, -1, -1):
+ item,name,id_ = rollback_list[i]
+ if item=="creds":
+ print("changing credentials {}='{}'".format(name, id_)).ljust(50),
+ else:
+ print("deleting {} '{}'".format(item, name)).ljust(50),
+ sys.stdout.flush()
+ if item=="flavor":
+ result,message=myvim.delete_tenant_flavor(id_)
+ elif item=="image":
+ result,message=myvim.delete_tenant_image(id_)
+ elif item=="tenant":
+ result,message=myvim.delete_tenant(id_)
+ elif item=="user":
+ result,message=myvim.delete_user(id_)
+ elif item=="network":
+ result,message=myvim.delete_tenant_network(id_)
+ elif item=="vm":
+ result,message=myvim.delete_tenant_vminstance(id_)
+ elif item=="creds":
+ try:
+ myvim[name]=id_
+ result=1
+ except Exception as e:
+ result=-1
+ message= " " + str(type(e))[6:-1] + ": "+ str(e)
+ else:
+ print("Internal error unknown item rollback {},{},{}".format(item,name,id_))
+ continue
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", message)
+ continue
+ else:
+ print(" Ok")
+
+if __name__=="__main__":
+ global myvim
+ global rollback_list
+ #print("(c) Copyright Telefonica"
+ rollback_list=[]
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "hv:u:U:p:t:i:",
+ ["username=", "help", "version=", "password=", "tenant=", "url=","skip-admin-tests",'image='])
+ except getopt.GetoptError as err:
+ # print(help information and exit:
+ print("Error:", err) # will print something like "option -a not recognized")
+ usage()
+ sys.exit(2)
+
+ creds = {}
+ creds['version'] = os.environ.get('OS_VERSION', '2')
+ creds['username'] = os.environ.get('OS_USERNAME')
+ creds['password'] = os.environ.get('OS_PASSWORD')
+ creds['auth_url'] = os.environ.get('OS_AUTH_URL')
+ creds['tenant_name'] = os.environ.get('OS_TENANT_NAME')
+ skip_admin_tests=False
+ image_path="http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img"
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit()
+ elif o in ("-v", "--version"):
+ creds['version']=a
+ elif o in ("-u", "--username"):
+ creds['username']=a
+ elif o in ("-p", "--password"):
+ creds['password']=a
+ elif o in ("-U", "--auth_url"):
+ creds['auth_url']=a
+ elif o in ("-t", "--tenant_name"):
+ creds['tenant_name']=a
+ elif o in ("-i", "--image"):
+ image_path=a
+ elif o=="--skip-admin-tests":
+ skip_admin_tests=True
+ else:
+ assert False, "Unhandled option"
+
+ if creds['auth_url']==None:
+ print("you must provide openstack url with -U or bash OS_AUTH_URL")
+ sys.exit()
+ print("creds:", creds)
+
+
+ try:
+ print('load osconnector class'.ljust(50))
+ sys.stdout.flush()
+ try:
+ myvim=osconnector.osconnector(uuid=None, name='test-openstack', tenant=creds['tenant_name'],
+ url=creds['auth_url'], url_admin=None,
+ user=creds['username'], passwd=creds['password'],
+ debug = False, config={'network_vlan_ranges':'physnet_sriov'} )
+ print(" Ok")
+ except Exception as e:
+ print(" Fail")
+ print(str(type(e))[6:-1] + ": "+ str(e))
+ exit(-1)
+
+ if not skip_admin_tests:
+ tenant_name="tos-tenant"
+ print("creating new tenant '{}'".format(tenant_name)).ljust(50),
+ sys.stdout.flush()
+ result,new_tenant=myvim.new_tenant(tenant_name, "test tenant_description, trying a long description to get the limit. 2 trying a long description to get the limit. 3. trying a long description to get the limit.")
+ if result<0:
+ print(" Fail")
+ print(" you can skip tenant creation with param'--skip-admin-tests'")
+ print(" VIM response:", new_tenant)
+ exit(-1)
+ else:
+ print(" Ok", new_tenant)
+ rollback_list.append(("tenant",tenant_name,new_tenant))
+
+ user_name="tos-user"
+ print("creating new user '{}'".format(user_name).ljust(50), end="")
+ sys.stdout.flush()
+ result,new_user=myvim.new_user(user_name, user_name, tenant_id=new_tenant)
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", new_user)
+ exit(-1)
+ else:
+ print(" Ok", new_user)
+ rollback_list.append(("user",user_name,new_user))
+
+ name="tos-fl1"
+ print("creating new flavor '{}'".format(name)).ljust(50),
+ sys.stdout.flush()
+ flavor={}
+ flavor['name']=name
+ result,new_flavor1=myvim.new_tenant_flavor(flavor, True)
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", new_flavor1)
+ exit(-1)
+ else:
+ print(" Ok", new_flavor1)
+ rollback_list.append(("flavor",name,new_flavor1))
+
+ name="tos-cirros"
+ print("creating new image '{}'".format(name).ljust(50))
+ sys.stdout.flush()
+ image={}
+ image['name']=name
+ image['location']=image_path #"/home/atierno/cirros-0.3.3-x86_64-disk.img"
+ result,new_image1=myvim.new_tenant_image(image)
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", new_image1)
+ exit(-1)
+ else:
+ print(" Ok", new_image1)
+ rollback_list.append(("image",name, new_image1))
+
+ if not skip_admin_tests:
+ try:
+ print('changing credentials to new tenant'.ljust(50))
+ sys.stdout.flush()
+ myvim['tenant'] =tenant_name
+ myvim['user']=user_name
+ myvim['passwd']=user_name
+ print(" Ok")
+ rollback_list.append(("creds", "tenant", creds["tenant_name"]))
+ rollback_list.append(("creds", "user", creds["username"]))
+ rollback_list.append(("creds", "passwd", creds["password"]))
+ except Exception as e:
+ print(" Fail")
+ print(" Error setting osconnector to new tenant:", str(type(e))[6:-1] + ": "+ str(e))
+ exit(-1)
+
+ name="tos-net-bridge"
+ print("creating new net '{}'".format(name)).ljust(50),
+ sys.stdout.flush()
+ result,new_net1=myvim.new_tenant_network(name, "bridge")
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", new_net1)
+ exit(-1)
+ else:
+ print(" Ok", new_net1)
+ rollback_list.append(("network",name, new_net1))
+
+ name="tos-vm-cloud"
+ print("creating new VM '{}'".format(name).ljust(50))
+ sys.stdout.flush()
+ result,new_vm1=myvim.new_tenant_vminstance(name, "vm-cloud-description", False,new_image1,new_flavor1,
+ [{"net_id":new_net1, "type":"virtio"}] )
+ if result<0:
+ print(" Fail")
+ print(" VIM response:", new_vm1)
+ exit(-1)
+ else:
+ print(" Ok", new_vm1)
+ rollback_list.append(("vm",name, new_vm1))
+
+
+ print('DONE Ok')
+ print("Type ENTER to delete items")
+ input('> ')
+ exit()
+
+ except KeyboardInterrupt:
+ print(" Canceled!")
+ except SystemExit:
+ pass
+ if len(rollback_list):
+ delete_items()
+
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano deployment over a vim
+#in order to use you need to set the VIM_XXXX bash variables with a vim values
+# VIM_TYPE openstack or openvim
+# VIM_USERNAME e.g.: admin
+# VIM_PASSWORD
+# VIM_AUTH_URL url to access VIM e.g. http:/openstack:35357/v2.0
+# VIM_AUTH_URL_ADMIN admin url
+# VIM_TENANT_NAME e.g.: admin
+# VIM_CONFIG e.g.: "'network_vlan_ranges: sriov_net'"
+# VIM_TEST_IMAGE_PATH_LINUX image path(location) to use by the VNF linux
+# VIM_TEST_IMAGE_PATH_NFV image path(location) to use by the VNF dataplaneVNF_2VMs and dataplaneVNF3
+
+#it should be used with source. It can modifies /home/$USER/.bashrc appending the variables
+#you need to delete them manually if desired
+
+function usage(){
+ echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n test VIM managing from openmano"
+ echo -e " <action> is a list of the following items (by default 'reset create')"
+ echo -e " reset reset the openmano database content"
+ echo -e " create creates items at VIM"
+ echo -e " delete delete created items"
+ echo -e " OPTIONS:"
+ echo -e " -f --force does not prompt for confirmation"
+ echo -e " -h --help shows this help"
+ echo -e " --insert-bashrc insert the created tenant,datacenter variables at"
+ echo -e " ~/.bashrc to be available by openmano config"
+}
+
+function is_valid_uuid(){
+ echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+ echo "$1" | grep -q -E '^[0-9a-f]{32}$' && return 0
+ return 1
+}
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+#detect if environment variables are set
+fail=""
+[[ -z $VIM_TYPE ]] && echo "VIM_TYPE variable not defined" >&2 && fail=1
+[[ -z $VIM_USERNAME ]] && echo "VIM_USERNAME variable not defined" >&2 && fail=1
+[[ -z $VIM_PASSWORD ]] && echo "VIM_PASSWORD variable not defined" >&2 && fail=1
+[[ -z $VIM_AUTH_URL ]] && echo "VIM_AUTH_URL variable not defined" >&2 && fail=1
+[[ -z $VIM_TENANT_NAME ]] && [[ -z $VIM_TENANT_NAME ]] && echo "neither VIM_TENANT_NAME not VIM_TENANT_ID variables are not defined" >&2 && fail=1
+[[ -z $VIM_CONFIG ]] && echo "VIM_CONFIG variable not defined" >&2 && fail=1
+[[ -z $VIM_TEST_IMAGE_PATH_LINUX ]] && echo "VIM_TEST_IMAGE_PATH_LINUX variable not defined" >&2 && fail=1
+[[ -z $VIM_TEST_IMAGE_PATH_NFV ]] && echo "VIM_TEST_IMAGE_PATH_NFV variable not defined" >&2 && fail=1
+[[ -n $fail ]] && $_exit 1
+
+#check correct arguments
+action_list=""
+for param in $*
+do
+ if [[ $param == reset ]] || [[ $param == create ]] || [[ $param == delete ]]
+ then
+ action_list="$action_list $param"
+ elif [[ $param == -h ]] || [[ $param == --help ]]
+ then
+ usage
+ $_exit 0
+ elif [[ $param == -f ]] || [[ $param == --force ]]
+ then
+ force=y
+ elif [[ $param == --insert-bashrc ]]
+ then
+ insert_bashrc=y
+ else
+ echo "invalid argument '$param'?" && usage >&2 && $_exit 1
+ fi
+done
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+#by default action should be reset and create
+[[ -z $action_list ]] && action_list="reset create"
+
+for action in $action_list
+do
+if [[ $action == "reset" ]]
+then
+
+ #ask for confirmation if argument is not -f --force
+ [[ $force != y ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N)" force
+ [[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
+
+ echo "Stopping openmano"
+ $DIRscript/service-openmano mano stop
+ echo "Initializing openmano database"
+ $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw --createdb
+ echo "Starting openmano"
+ $DIRscript/service-openmano mano start
+
+elif [[ $action == "delete" ]]
+then
+ result=`openmano tenant-list TESTVIM-tenant`
+ nfvotenant=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ is_valid_uuid $nfvotenant || ! echo "Tenant TESTVIM-tenant not found. Already delete?" >&2 || $_exit 1
+ export OPENMANO_TENANT=$nfvotenant
+ openmano instance-scenario-delete -f simple-instance || echo "fail"
+ openmano instance-scenario-delete -f complex2-instance || echo "fail"
+ openmano instance-scenario-delete -f complex4-instance || echo "fail"
+ openmano scenario-delete -f simple || echo "fail"
+ openmano scenario-delete -f complex2 || echo "fail"
+ openmano scenario-delete -f complex3 || echo "fail"
+ openmano scenario-delete -f complex4 || echo "fail"
+ openmano vnf-delete -f linux || echo "fail"
+ openmano vnf-delete -f linux_2VMs_v02 || echo "fail"
+ openmano vnf-delete -f dataplaneVNF_2VMs || echo "fail"
+ openmano vnf-delete -f dataplaneVNF3 || echo "fail"
+ openmano vnf-delete -f TESTVIM-VNF1 || echo "fail"
+ openmano datacenter-detach TESTVIM-dc || echo "fail"
+ openmano datacenter-delete -f TESTVIM-dc || echo "fail"
+ openmano tenant-delete -f TESTVIM-tenant || echo "fail"
+
+elif [[ $action == "create" ]]
+then
+
+ printf "%-50s" "Creating openmano tenant 'TESTVIM-tenant': "
+ result=`openmano tenant-create TESTVIM-tenant --description="created by test_vimconn.sh"`
+ nfvotenant=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $nfvotenant && echo "FAIL" && echo " $result" && $_exit 1
+ export OPENMANO_TENANT=$nfvotenant
+ [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_TENANT=$nfvotenant" >> ~/.bashrc
+ echo $nfvotenant
+
+ printf "%-50s" "Creating datacenter 'TESTVIM-dc' in openmano:"
+ URL_ADMIN_PARAM=""
+ [[ -n $VIM_AUTH_URL_ADMIN ]] && URL_ADMIN_PARAM="--url_admin=$VIM_AUTH_URL_ADMIN"
+ result=`openmano datacenter-create TESTVIM-dc "${VIM_AUTH_URL}" "--type=$VIM_TYPE" $URL_ADMIN_PARAM "--config=${VIM_CONFIG}"`
+ datacenter=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $datacenter && echo "FAIL" && echo " $result" && $_exit 1
+ echo $datacenter
+ export OPENMANO_DATACENTER=$datacenter
+ [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_DATACENTER=$datacenter" >> ~/.bashrc
+
+ printf "%-50s" "Attaching openmano tenant to the datacenter:"
+ [[ -n $VIM_PASSWORD ]] && passwd_param="--password=$VIM_PASSWORD" || passwd_param=""
+ [[ -n $VIM_TENANT_NAME ]] && vim_tenant_name_param="--vim-tenant-name=$VIM_TENANT_NAME" || vim_tenant_name_param=""
+ [[ -n $VIM_TENANT_ID ]] && vim_tenant_id_param="--vim-tenant-id=$VIM_TENANT_ID" || vim_tenant_id_param=""
+ [[ -n $VIM_PASSWORD ]] && passwd_param="--password=$VIM_PASSWORD" || passwd_param=""
+ result=`openmano datacenter-attach TESTVIM-dc "--user=$VIM_USERNAME" "$passwd_param" "$vim_tenant_name_param"`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ echo OK
+
+ printf "%-50s" "Updating external nets in openmano: "
+ result=`openmano datacenter-netmap-delete -f --all`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ result=`openmano datacenter-netmap-import -f`
+ [[ $? != 0 ]] && echo "FAIL" && echo " $result" && $_exit 1
+ echo OK
+
+ printf "%-50s" "Creating VNF 'linux': "
+ #glance image-create --file=./US1404dpdk.qcow2 --name=US1404dpdk --disk-format=qcow2 --min-disk=2 --is-public=True --container-format=bare
+ #nova image-meta US1404dpdk set location=/mnt/powervault/virtualization/vnfs/os/US1404dpdk.qcow2
+ #glance image-create --file=./US1404user.qcow2 --min-disk=2 --is-public=True --container-format=bare --name=US1404user --disk-format=qcow2
+ #nova image-meta US1404user set location=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2
+ result=`openmano vnf-create $DIRmano/vnfs/examples/linux.yaml "--image-path=$VIM_TEST_IMAGE_PATH_LINUX"`
+ vnf=`echo $result |gawk '{print $1}'`
+ #check a valid uuid is obtained
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ printf "%-50s" "Creating VNF 1PF,1VF,2GB,4PThreads: "
+ result=`openmano vnf-create "vnf:
+ name: TESTVIM-VNF1
+ external-connections:
+ - name: eth0
+ type: mgmt
+ VNFC: TESTVIM-VNF1-VM
+ local_iface_name: eth0
+ - name: PF0
+ type: data
+ VNFC: TESTVIM-VNF1-VM
+ local_iface_name: PF0
+ - name: VF0
+ type: data
+ VNFC: TESTVIM-VNF1-VM
+ local_iface_name: VF0
+ VNFC:
+ - name: TESTVIM-VNF1-VM
+ VNFC image: $VIM_TEST_IMAGE_PATH_NFV
+ numas:
+ - paired-threads: 2
+ paired-threads-id: [ [0,2], [1,3] ]
+ memory: 2
+ interfaces:
+ - name: PF0
+ vpci: '0000:00:11.0'
+ dedicated: 'yes'
+ bandwidth: 10 Gbps
+ mac_address: '20:33:45:56:77:44'
+ - name: VF0
+ vpci: '0000:00:12.0'
+ dedicated: 'no'
+ bandwidth: 1 Gbps
+ mac_address: '20:33:45:56:77:45'
+ bridge-ifaces:
+ - name: eth0
+ vpci: '0000:00:09.0'
+ bandwidth: 1 Mbps
+ mac_address: '20:33:45:56:77:46'
+ model: e1000
+ "`
+ vnf=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs': "
+ result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+ vnf=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ printf "%-50s" "Creating VNF 'dataplaneVNF3.yaml': "
+ result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF3.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV"`
+ vnf=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs_v02': "
+ result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+ vnf=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ printf "%-50s" "Creating VNF 'linux_2VMs_v02': "
+ result=`openmano vnf-create $DIRmano/vnfs/examples/linux_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+ vnf=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $vnf && echo FAIL && echo " $result" && $_exit 1
+ echo $vnf
+
+ for sce in simple complex2 complex3 complex4
+ do
+ printf "%-50s" "Creating scenario '$sce':"
+ result=`openmano scenario-create $DIRmano/scenarios/examples/${sce}.yaml`
+ scenario=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $scenario && echo FAIL && echo " $result" && $_exit 1
+ echo $scenario
+ done
+
+ #USER_KEY=""
+ key_param1=""
+ key_param2=""
+ #add user keys if present at .ssh
+ ls ${HOME}/.ssh/*.pub > /dev/null 2>&1 && key_param1=--keypair-auto
+
+ for sce in simple complex2
+ do
+ printf "%-50s" "Deploying scenario '$sce':"
+ result=`openmano instance-scenario-create --scenario $sce --name ${sce}-instance "$key_param1" "$key_param2"`
+ instance=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $instance && echo FAIL && echo " $result" && $_exit 1
+ echo $instance
+ done
+
+ #Testing IP parameters in networks
+ printf "%-50s" "Deploying scenario 'complex4' with IP parameters in networks:"
+ result=`openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml "$key_param1" "$key_param2"`
+ instance=`echo $result |gawk '{print $1}'`
+ ! is_valid_uuid $instance && echo FAIL && echo " $result" && $_exit 1
+ echo $instance
+
+ echo
+ echo DONE
+fi
+done
+
--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+envlist = flake8
+toxworkdir={toxinidir}/../.tox
+
+[testenv]
+usedevelop = True
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+# TODO for the moment few files are tested.
+commands = flake8 osm_ro/wim --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
--- /dev/null
+deps/
+builds/
--- /dev/null
+# Juju Charm(s) for deploying OpenMano
+
+## Overview
+These are the charm layers used to build Juju charms for deploying OpenVIM components. These charms are also published to the [Juju Charm Store](https://jujucharms.com/) and can be deployed directly from there using the [etsi-osm](https://jujucharms.com/u/nfv/osm-r1), or they can be build from these layers and deployed locally.
+
+## Building the OpenVIM Charms
+
+To build these charms, you will need [charm-tools][]. You should also read
+over the developer [Getting Started][] page for an overview of charms and
+building them. Then, in any of the charm layer directories, use `charm build`.
+For example:
+
+ (setup environment to build from layers)
+ mkdir src
+ cd src
+ git clone https://github.com/nfvlabs/openvim.git
+ export JUJU_REPOSITORY=$HOME/src/openvim/charms
+ export INTERFACE_PATH=$JUJU_REPOSITORY/interfaces
+ export LAYER_PATH=$JUJU_REPOSITORY/layers
+
+ cd $LAYER_PATH/openvim
+ charm build
+
+ cd $LAYER_PATH/openvim-compute
+ charm build
+
+This will build the OpenVIM controller and OpenVIM compute charms, pulling in
+ the appropriate base and interface layers from [interfaces.juju.solutions][], and place the resulting charms into $JUJU_REPOSITORY/builds.
+
+You can also use the local version of a bundle:
+
+ juju deploy openvim/charms/bundles/openmano.yaml
+
+To publish:
+
+ # You will need an account on Launchpad, and have it added to the ~nfv
+ # namespace. Please contact foo@bar for these permissions.
+ $ charm login
+
+ $ cd $JUJU_REPOSITORY/builds/openvim
+
+ # `charm push` will upload the charm into the store and report the revision
+ # of the latest push.
+ $ charm push . cs:~nfv/openvim
+ blah blah cs:~nfv/openvim-4
+
+ # Release the charm so that it is publicly consumable
+ $ charm release cs:~nfv/openvim-4
+
+ $ cd $JUJU_REPOSITORY/builds/openvim-compute
+
+ # `charm push` will upload the charm into the store and report the revision
+ # of the latest push.
+ $ charm push . cs:~nfv/openvim-compute
+ blah blah cs:~nfv/openvim-compute-4
+
+ # Release the charm so that it is publicly consumable
+ $ charm release cs:~nfv/openvim-compute-4
+
+ # Finally, update and publish the bundle to point to the latest revision(s):
+
+ cd $JUJU_REPOSITORY/bundles/openmano
+
+ # Edit the `README.md` to reflect any notable changes.
+
+ # Edit the `bundle.yaml` with the new revision to be deployed, i.e., change cs:~nfv/openvim-3 to cs:~nfv/openvim-4
+
+ $ charm push . cs:~nfv/bundle/osm-r1
+ blah blah cs:~nfv/bundle/osm-r1-4
+
+ $ charm release cs:~nfv/bundle/osm-r1-4
+
+To deploy the published charms from the charm store:
+
+ # The recommended method
+ $ charm deploy cs:~nfv/bundles/openmano
+
+ - or -
+
+ # The manual method
+ $ juju deploy cs:~nfv/openvim
+ $ juju deploy cs:~nfv/openvim-compute
+ $ juju deploy cs:~nfv/openmano
+ $ juju deploy cs:mariadb
+
+ $ juju add-relation mariadb openvim
+ $ juju add-relation mariadb openmano
+ $ juju add-relation openvim openvim-compute
+ $ juju add-relation openvim openmano
+
+[charm-tools]: https://jujucharms.com/docs/stable/tools-charm-tools
+[Getting Started]: https://jujucharms.com/docs/devel/developer-getting-started
+[interfaces.juju.solutions]: http://interfaces.juju.solutions/
--- /dev/null
+# Overview
+
+Deploys OpenMANO.
--- /dev/null
+options:
+ repository:
+ type: string
+ default: "https://osm.etsi.org/gerrit/osm/RO.git"
+ description: "The Git repository to install OpenMano from."
+ branch:
+ type: string
+ default: "master"
+ description: "The Git branch to install."
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="96"
+ height="96"
+ id="svg6517"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="icon.svg">
+ <defs
+ id="defs6519">
+ <linearGradient
+ id="Background-0">
+ <stop
+ style="stop-color:#b8b8b8;stop-opacity:1"
+ offset="0"
+ id="stop4245" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.98039216"
+ offset="1"
+ id="stop4247" />
+ </linearGradient>
+ <linearGradient
+ id="Background">
+ <stop
+ id="stop4178"
+ offset="0"
+ style="stop-color:#b8b8b8;stop-opacity:1" />
+ <stop
+ id="stop4180"
+ offset="1"
+ style="stop-color:#c9c9c9;stop-opacity:1" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Inner Shadow"
+ id="filter1121">
+ <feFlood
+ flood-opacity="0.59999999999999998"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood1123" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="out"
+ result="composite1"
+ id="feComposite1125" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur1127" />
+ <feOffset
+ dx="0"
+ dy="2"
+ result="offset"
+ id="feOffset1129" />
+ <feComposite
+ in="offset"
+ in2="SourceGraphic"
+ operator="atop"
+ result="composite2"
+ id="feComposite1131" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Drop Shadow"
+ id="filter950">
+ <feFlood
+ flood-opacity="0.25"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood952" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="in"
+ result="composite1"
+ id="feComposite954" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur956" />
+ <feOffset
+ dx="0"
+ dy="1"
+ result="offset"
+ id="feOffset958" />
+ <feComposite
+ in="SourceGraphic"
+ in2="offset"
+ operator="over"
+ result="composite2"
+ id="feComposite960" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath873">
+ <g
+ transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
+ id="g875"
+ inkscape:label="Layer 1"
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
+ <path
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
+ d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter891"
+ inkscape:label="Badge Shadow">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.71999962"
+ id="feGaussianBlur893" />
+ </filter>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="4.0745362"
+ inkscape:cx="119.38505"
+ inkscape:cy="39.201101"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1600"
+ inkscape:window-height="876"
+ inkscape:window-x="0"
+ inkscape:window-y="24"
+ inkscape:window-maximized="1"
+ showborder="true"
+ showguides="true"
+ inkscape:guide-bbox="true"
+ inkscape:showpageshadow="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid821" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="16,48"
+ id="guide823" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,80"
+ id="guide825" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="80,40"
+ id="guide827" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,16"
+ id="guide829" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata6522">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="BACKGROUND"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(268,-635.29076)"
+ style="display:inline">
+ <path
+ style="display:inline;fill:#d0ffff;fill-opacity:1;stroke:none;filter:url(#filter1121)"
+ d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 c -27.21517,0 -31.10302,-3.89189 -31.10302,-31.13514 z"
+ id="path6455"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="PLACE YOUR PICTOGRAM HERE"
+ style="display:inline"
+ sodipodi:insensitive="true">
+ <image
+ y="19.840546"
+ x="15"
+ id="image3432"
+ xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA2MAAALTCAYAAACbjNrIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz AAAN1wAADdcBQiibeAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAACAASURB VHic7N1pmFXVlTfw/9rn1sQkoDgBVQXibDSKsyYWQ40IMbGr4myMA4lJzNx5+83b3TdDJ51Od9JJ xyiTRAKFXXS6k6iFUIVFOo5RMhmSmKCC4ogiMlfde/Z6PwCGIEMN55y1zz7r96GfJ0/DOX+suvfs dfbeawNKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkop pZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWU UkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJK KaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSiml lFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRSSimllFJKKaWUUkoppZRS SimllFJKKaWUUkoppZSzSDqAUiob8vm8qbz48ZOKCE+H5aMM0REgLmGLzUTUYwkvM9t1AJ5/OVd8 KT9pZVE6s1LKIzVdufFDNxxbLBQribgKMMcAtpRghjHbAhu8bti8woZ+u+6cPz6NfN5KR1ZK+U+L MaVUbO5qbxwVlobvB9MHAL4QwNBe/tUigLUMftqweZoNP22JnsRhrz818+xVhRgjK6VSbuLEWSUb jh5yOiwmEtOJIJwE0AkAVwPI9fIyW0B4nBkduVzunmfvbXk+xshKqQzTYkylynfbG8ty1gwqGWwH B2GhlIvB0GIJghION3WT7eYdFduP2D5oc0vLklA6a5bNWlF/lmHOg9EEIIjw0jvA+BUDPwuMufeF h85/PK9vr5XKtua2oGp7eB6HfCkBl4BwFoDyCO9gCbgPxP+4tv3qX0d4XdVXzW1B5fbCMEslgwwX yqgYDDdAGObCLaHlnsDQtlI7YvuapU3d0lGV6i0txpTT2tqaSzeNfKuZQO8j8DnMqARgevFXQyK8 wEzPMOwfDMyjxvLPP1y//IW4M2fZ/K6a8mKx9OsAbkPvfk4DtQHAPdbi+zPrl/8xgfsppRxRXdd6 Ehv+OIhaAIxK4JYhGN8poeH/Vwf78Ro/va0yDMOLwbgAsCcDdByAsejdyz0LonVk+Qk29JMjXt28 ZNWqmbqiQjlLizHlrDmd9e8h8AJmVEd3VfoVwD8qmuIPPjrlwReju66a0znlKNjgpyCcK3B7ZqAD zF+4pa5D31wr5bGqxsVnMuw/E6gWIuMYeiwXFt73zPLrXkv+3v4ac2nb6CAsfgjA5QDOjPDSz8LQ tevuv/KRCK+pVGS0GFNOmtPR8AHA3gOgJKZbFED0Y4T8jZvrl6+K6R6ZMaur5ggqlvyMQKcIRwmZ MTcYVP75Gy/+6RbhLEqpCE1oXDisAPOvAG5EMjPvB7O6kEPNS/de9bpwjtQb27T4bMP8BQCXofd7 +vqqh4hb1rZf/ZOYrq9Uv2kxppwzd3ntRUz0IIDSBG7HAH5qYf9+Zm3nUwnczzv5fN6MvujRdoDr pbPs5Y/W4v26dFEpP1Q2LjyFYP4bwInSWd7GeGDd+X+apl0X+2dcQ+vpTPgKA9ORzHi0G4Ym6wyZ co0WY8op379v2oiSssJqAMckfOsQwOxSCv/++qkr3kj43qk2p6PuiwC+Kp1jP7YQc+NNdR0PSwdR SvXf2PrF7zGG70fvu7EmiL+4bunVX5NOkSajp9x9eFCa+yqBbka0DZ5642U2fOrz91/9ZsL3VeqA pKf5lforJeXFf0HyhRiw64Hw0R4O/jS3s3ZmPp/Xz0YvzFlWNxHAl6RzHMBQS9Q+u7PhHOkgSqn+ qW5afJ4xfB+cLMQAgL5UVX/PWdIpUiGfN9UNrTNzpSV/JtBHkHwhBgDHkKVvCNxXqQPSAadyxuzl tZeA+UbhGCOZ6c7RFz3y8LwH6s8QzuI8NvgmZB6ovULAMGJuv2tZ3VjpLEqpvhnXtKiKme8HMEw6 y0HkYOw3pUO4rrpp0burHj/xYSbcCWCEcJybKhtaLxbOoNTbtBhTTmAGEdG/w52ls+fbgJ+cu7z+ 327vqhkiHcZF85bX1hEwSTrHofERoUFbW1tzEnsQlVIROLW5rdQy/SeAw6Wz9MLkyvpFU6VDuOjE GfOGVjUt+hYzPQHw+dJ5diMi/DvArow3VMZpMaacMKejdjqAd0vn2EeOiT9TWiz9/ezOukulw7iG QZ+SztAH528euenL0iGUUr2zbWvxKwDOk87RWxSk6vswEZWNrdN3Fip+D6ZPI74uif01sbJp0TTp EEoB7sxCqIyb01H7C4Bc39sze0chuO22pqWZP+zzzuW1xwdEf0S6XugUmfkcPYdMKbeNaVr4roDN KsR3tEkcOCBz0rPtV/xJOoi0CY3tZUV681+Y6RNwe5y5at3SK88BiKWDqGxL00BKeWrO8tqmFBRi AHBLRUn4yJ3La4+XDiLNEN2C9H1/5Ah0O7PTgwOlMo4p4GA20lWIAQBZttJ7nsWNb7rnhAI2PcpM t8HtQgwAJlY3LXbpSBaVUWkbTCkPMdHnpDP0wVkB0RNzOqa6svY9cfl83hBwpXSOfiFcOG9F3fuk Yyil9q+qafEHHNpb1CcWuBIZ7sRbNW3xhSHbXwA4UzpLbzFzmsYfylOZ/dJQbrirs/EUAmqkc/TR YYB5YNayupOkg0gYfcEjNQBGS+foL2Z8RY8uUMpBzW0B2NmjMg6JgLHjfnF8Jrv0VTYuPAWWHwBw mHSWvqHJ1XWtmXyWK3fogESJCmFvhftLGfbnMGPQlu+qcW1TcvwIV0lHGKDTjr34kSbpEEqpv1a9 rXApgFOlcwxECEr792OfTZw4q4RglsDZs+AOijjArdIhVLZpMabE3N5VM4SZr5XOMQDvGl0omSkd Ikltbc0BiFK/zI8YH5POoJT6awz6uHSGgSLG+7O2VHHDqGEfBXCKdI4B+NCJM+alsZBUnsjUF4Zy S2mx9Fpy+zDPQyP6x1kdU1O2LKP/Ng/fdD7AR0jniEDd3BVNVdIhlFK7VE5bNB6MKdI5InBk9eMT zpUOkZTqy+YPJ+K/l84xQEO7eyoyN6Op3KHFmBJEN0sniMAoA/N56RBJsTAzpDNExFguXC4dQim1 C4V0OdK5ZP0dLJnUrx7oLe4u+xyA1L+gY4IP4xGVUlqMKRFzl9WdBnBqOi4dFOHW27tqhkjHSAIR T5fOEBWypMWYUq4g+oB0hKgQw5eXVge1e2mfL/utJlbWt6Z6v6JKLy3GlAg2uE46Q2QYI0oKJd6f LzOrY2olgJOlc0SGcN7C9sZ0L5NVygPVl80fDrBPS/tOOa5u8VjpEHHr7hl0E4AR0jmiYgyleQ+7 SjEtxlTi8vm8Aaf0nKoDIKIPSmeIm0Hgw36OvQXduWIqzzNSyie8s/RCeDYeKQR2knSG2JH16jnO 4GvQ3BZI51DZ49WXn0qHsRc+PBWEMdI5Inbu3Z1TDpcOES+ukU4QtdDgIukMSmUe0YXSEaJHk6UT xGlCY9soBk2UzhGx0ZXbil7/3JSbtBhTibNE/ixR/IugYI3fB0cyvHtIkaU0t2NWyhc+7tXx7vty byEVToaHY0iyqT5uR6WUdx8k5bZZ904fBOAy6RyxMDRKOkJcZndOOcHD2UzApz1wSqUVwbsXWQSM HVP/wwnSOeJi4enzjnD5UXULBkvHUNmixZhKFJX3NALw8ouOAX+XKdqcn295CROY/WinrVQ6MYEx XjpFHIwxfn5vAmCmkdIZ4kGDyk1QL51CZYsWYypRBNsinSEuBM5JZ4gLkbeb0cvuuH/acOkQSmXV sdMXHw6gVDpHHAjk6/cmiLlEOkNsDP2NdASVLVqMqcTsWqJITdI5VN/smjmiS6RzxKVsUHikdAal sipXtD5//iYDrDPvacOYPqa5rUI6hsoOLcZUYnYvUfT2cGRm8vLzNHdFw7sAHCWdIzYWni63USoV vDmnaj+OHDvtP31sTuK7IcGWsEE6hMoOLwePyk0EbpbOoPqObejtUptduFw6gVKZxYHXnz+y3i7x 9htZXaqoEqPFmErEtx65oALANOkccSLmgnSGWBDVSEeIU0hajCklhYi83C+2B/n6/WnQIx0hXjS9 uma+PhtUIrQYU4kYsnVILTxeoggAILNdOkIcCDhfOkOsQo83oivlOOtzIwgAYPbz+5N5h3SEmA3l svIp0iFUNmgxphJBoBnSGWJH7F0xdsfShmoAR0vniFMA7JTOoFRWBbDd0hliduz46W2V0iEix/C9 GAMHPF06g8oGLcZU7PL5vAH5vUQRAMj6V4wFJdbPt7p7scZ4P6hQylVk/B/UFwuF86QzRI0Mefe8 2xcxZmg3TJUELcZU7EZf9NC58Hx2ZbeN0gGiRgzvBhH7MtZ6PxhUylmh/zPTROTh9yi9KZ0gAceM bVh0tnQI5T8txlT8KMjEVD8xvSqdIQYeDiL+WpjTmTGlpBQC7/ceASD/VhhY+Pi8eweTkfGLkqXF mIofs//7xQDeGgavSYeIUltbcymAM6VzxC1nwwwMBpVyVJiFYownntrc5lfXyJ07X5GOkAjKxPhF CdNiTMVqzrK6cQBOk84RO8Km25qWerURfdPILWcA8L61LwWl3u99UMpVZHNZ+PyVb94Wni4dIkpr V96wE8Bb0jlixzhjXNOiKukYym9ajKlYscGl0hkSYfGSdISoGWbvlygCKA7eMHiDdAilsuqFwtGv AQilc8TNsJfNkLx77u2PhcnGOEaJ0WJMxcowGqUzJMLgWekIUWPwBdIZ4kaE9S0tS7wfCCrlrJWT ipyFQb2XTTzwnHSARDAapCMov2kxpmIzv6umnAmXSOdIBOMZ6Qgx8HHw8FeY8YJ0BqWyjkDPS2eI HXvYxAPk3UvI/eNJExrby6RTKH9pMaZiYwsl7wUwSDpHEgjkVTF2d+eUwwGMl86RAP8HgUo5jzPw UoSPO3Z66xHSKaJEYK+eewcxuIffulg6hPKXFmMqNgxTL50hKdb49VDqRnABAO8Pu2TWYkwpaZSN lyJU0uPXUkULL1eE7BcZzsx4RiVPizEVG0s2M+usAzJeLdcgS+dKZ0gCgV+UzqBU1nEmZsYAGN+W ftvMFGPIyv53JUKLMRWLWR1TKwl0inSOhHS/QDv9eigRMlGMsaF10hmUyjpGNj6HDPbqe3XUa9v+ DKAgnSMhp41uXDhGOoTykxZjKhaGTGZmxQD8KT9pZVE6RFSYQQCfLZ0jCTnY30tnUCrrrA3/IJ0h CQScA7A3y79XrZpZAPBn6RxJyTHpUkUVCy3GVDw4U+urvRrQz1tWfxyAw6VzJGDr8w9dvFY6hFJZ t/6CZ54FsE06RwJGjqlfeJx0iCgR0WrpDIkhytJLZpUgLcZU5NramgMQTZLOkRhirx5GNpeJw55B wOp8Pm+lcyiVefm8JbBXL7UOJAhyXi1VtBZePf8OYTLyeR03q8jpL5WK3KaRW84CY4R0jqQwG68G EQZ0jnSGJDDRU9IZlFK7MLLxeWRmv75fKVPF2MiqR096t3QI5R8txlT0LE+WjpAkQuhVMcbWs8HC AbC1v5POoJTahYBMFGPEfjVHIpOhZYoAQHaKdATlHy3GVPQoU8XYjhdzRW82MM96cmIJCGdK50hC YMxvpTMopXaxnI2ZMRDOmjhxVol0jKisG2T+BGCHdI4EZWl8oxKSkw7gsrkdU88EUS3DHA/w4WAM BVAE0RYwvwniLcy0CYQ1geU1pqSw5oZJKzdJ55bU1tZc+hYydVL9ap86KQZvjjiNgQrpHEmgHqMz Y0o5Iijl39lsNEkvf/3IYacB+JV0kEgsaQnR2Pp7ABOloySC8J5Tm9tKVy9p6ZGOIqn6svnDw57y CYG1E5hwPJiGE9EQEEaw5aEwyIGxBYw3mPBnQ7x8bfvVv5bO7SotxvZjbsfUMxnm2wxcAgaw6//8 Be/+30yg3f9vSwRbLMWcjroNAJ4A88OW+N6ZtZ3ZeNu326bhmy4g0CDpHElhwK/ZFaJz9/1199Qr H25aukE6hFJql+d+etWrVY2trwE4UjpL3Bj2HPhSjO3yG2SlGAMGb99WOB/A/0oHSdK4htbTLfGl AF0E4BzuxigDBtPukxoIYPCu4TLhL8Nm2v0/mb5R1di60ljzqeeWXfEbkX+Ew7QY28eczrrbmPFN AKX9vMQoAE0gajKgf5rTUfc7EP5jR09w921NS7sjjOomg8kZGcwDAIjg1ZeKZTrXm0NwDoazsT9F qXThpwDyfk8OkTkXwGzpHFFhpt8QZefBz7uWKnpfjFXXzC9Hedn1TPi4BU7bVVYNSI019vGqhtbP rHvgqu9HkdEXumdsL3M7a/8ZjO+g/4XY/pwGxqyKkuLTczvqr4rwuk4ybDK1npote1WM7TqU1H/s WRGtlBeYMvG5JGavmngY8us5eCjM/r8wqGpcdDVXlD7NhDsBnBbhpctAuL2qsfWrEV4z9XRmbLfZ HXX/jxlfiO8OVMXgRXM66q7O5XI33jCp/ZX47iXj9q6aIVzMxhlVu3Gxp9SbZYq3d9UMQRGnSOdI AhGelM6QNrM6ph5mELwPsDVgOhWEKhBKd61NwWsgvAbGy0z8CwrpZ4e9ddivW1qWhNK5I9PcFozd Fp5pmC9h8DkAHUOEI8E4ErteGfcwYR0xVjNRVymHP1mz9JrN0rHThIAnszC/wsApR9UtGPzq8uu8 OOjaGv4t2UysqdjjvFE1bUM2rGzZKh0kalX1PzwGFNwFIO4Drr9Y3dC6de0DV/1zzPdJhUx9eg5k dmfdFcRoRXL/PV60FpfPrF/+eEL3S8SczrpaMJZL50gOr7u5tqNaOkVUZi+vvYSIVkrnSII1wYSZ U5Y+I50jDe7unHJ4NwdfJGAmgF7vB2VgM4AHDWhJd677px+btDJ1A5dRNW1DBg0KZ4C5GbuWJg3r /d/m7SDMCsOSr65f1rIxrow+GXPpouODkP4knSMJhviS59qv9mapW1XjonUAVUrnSAoT6p5vv6pD OkeUqhpaLwDhvwAcm9AtGYTmde1X/Sih+zkr88sU5z3YcCIxZiPZwnS0MVgxu2NqfYL3jJ+l90hH SBSRN7NiAGAoG4c9A9h4y+Slz0qHSIO5nXXX93CwhoBPow+FGAAQMIyAyxi8qLRY+uqcjvrW2ctr L4kpaqSqGxbXVDW2Lh5UUXwNzIsAXIY+FWIAQIPA9OnAFNdUNi66Lo6cvll/31VrAGSiI7Flv75v CdlYYroHsV/jnarGexpB3InkCjEAIDDmjan/4YQE7+mkTBdjzCAb2nkAhgrcfjDB/HhOR0ONwL3j QdarL6dDIfZr3xETebWP4YAYq4iy1Gam727vqhkyp6NuITN+AGB4BJccBPCVRLRyTkfdU3M76q/K 5/NuPX/yeVPVuOjqqsbW3zFxF4ArEM0xDyMIdHdVU+sPj6pbMDiC63mMGOBV0imS4Nvhz8zZ2jcG Zm+O8KlsWDwFsP8DmU7YhwUmmAtwplfqufUwTNi8zrobAFwkGKGcYX8y78GGEwUzRKKtrbkUoCzt F/OvCYTlC6QjJIKQicFef83vajq6NCxdCeDqmG5xGoMXjb7o0SdndzY4MTtQ3bT4vKrHT1gF0EIA p8ZyE8Y1FUHQNW5G61GxXN8TBJOJ/ZyW4NX3rXfHvBwK8XmnNrdF2exNRNW0e04m4v8BUCYY45LK xtZrBe8vLrPF2N2dUw5n0DekcxAwzIa2dVcxk16bDn/rbGTksOA9Qo86Kd61rG4sCGOkcySBQZkY 7PXHnctrjy8Wi4+AkzgziM8ktg/N6ay7Lf57HVh1Y+unmPl/Abw77nsx6BxbwCNjG+85Lu57pZVF NprrEDB2dONCb75zcybw5nnYOzRo29aes6RTDMSExvYyWNsKmdVhf4VA3xxT3zZSOoeUzBZjPTb4 BsBHSOfY7azNwzd/XTrEgIR+rZ/uha2vPHKRNw0gioa8ekt7MMYEmRjs9dVdnVPHB6AHAYxL8Lal YHxn7vL6f0jwnm+rbmz9EgPfRrTHmRzKeILtqqpbkOR/59QwnI1iDAAC9ud799lz/7gGgBfdIXuL 2aR63NODTf+MBF5C9dKROROmexw8AJksxuY92HAiCB+SzrE3Jv703OW16T27ImP7xQA8lc/nrXSI qBAyskQR2HDTlPZ10iFcM3dFU1XI9KDU7CgTf2nO8tqPJXnPyqZFn2RApAgkYCyC3IPjp7dlpvtc b6194Mq1ADZI50gCGY+WKubzFqCnpGMkiQip3TdW2dRaS8AnpXPsjcE3jrl00fHSOSRkshizRft3 AALpHPsgJvq2c5vaeyGfzxuCP2/4esefJYoAAMaF0hES8oR0ANfM6ph6GNvifQBViQYh+vasZXWJ 7DutnLZoIjH9SxL3OojqsFh8oHLaohHCOdzDGdnXycar5yZlbN8YAxcjhWM25POGgG/CveOtgsCa GM/7dVf6fokGaO6KpioQrpLOcQDvGn3hw65mO6DRFz92GoCMrfX1p639/K6acpAzSxXixZSNQV4v tbU1lxrQjwGcJp0FQIkxtGB+V015nDeprplfDkuLkezSxAM5mSz918SJs0qkg7iFM7JUkc+qrpkf 6+97kjLXUREYWfnoCSdLh+ir6sdOuBaMM6Rz7BfztcfVLR4rHSNpmSvGYIt/C8DdBx/Rl9PWzIOZ M9VFEQDY+NNJMSyUTIQbA9PYMWxGBnm9s3nk5lkA1Ujn+As+oRiWfD7OO9jy8i8Q4NJSmMmvjxr6 fekQLiGTmX1jpTy4PNVNIPbGZL15LvYWEZ8vnaEvJjS2lzHhS9I5DqK0GHCszwAXZaoYm9M55SgG Piyd4xDGbR656QbpEH1B2SvGbFBW7s/aeMrOEtMwCHVmbLe5nbUzmflD0jnegenv7ljaUB3Hpavq FowjYveWwRBuqmpYdKN0DFcUTUlWijGvjhQpBZ4CsnWGI5FJ1XlxBXrrJgCyS9IPiW+c0Ng2SjpF kjJVjIGD6wA4vyTAgj4qnaFPyK/DK3vh2Rsv/ukW6RBR4ew073j5o1MefFE6hAvmLa89m5m+I53j ACpyOftPsVw5yH0Nrh7BQfS9qvp7vJklGYj197W8COBl6RwJ8eb7d83SazYDeFY6R5IYnK7xD/NH pCMcGg0qcniNdIokZaoYYzj4Fng/iHFGUhvZB+r2rpohAE6RzpEs9ma/GDMIIMmDzxPDwOPSGVyw sL1xmCVqg+whn4dyxezltZHuY6xuWvRuAC1RXjNi5TC2bVRN2xDpII7Iyuf1YoBda6QwEN48H3vp tKPqFgyWDtEbVdMWXwg39gcfElM6xutRyUwxNmtZ3XkESk3RYAg3S2fojfKw7By415kyXoRfS0eI ypyO2jMAHCWdIwmG+FHpDC7YWWq/g2TPEusPY0Bfi/KCbOnrcP+Zd9yg8uK3pUO4gDgzxdhR4+r/ 83TpEFFhpqztG8uVUkkqZrTZ8i3SGfrg9CytFHD9wRQZY3C9dIY+IVyxsL1xmHSMQ2HYc6QzJI6N N2/+jEGddIakWJuZwd0BzV5e+34n94ntBxMaZ3VMjeQcn8qG1otBaIjiWrEj3DS2YfEM6Rjy6DHp BEmxZL35HjaUuY6KMMb9pYrjp7YdRuBm6Rx9YjhV/RMGIhPF2O7W3VdI5+ijwTtKw0bpEIfE5PyX UNSKRX/e/DEbbwYBh1AsZ8pOU4D9uLtzyuFENEs6R18YMpF0/SK3u4e9gyGePaa+LWPHhfy1bTuD JwGE0jkS4dFLMQ6L3jwfe4uInN9WYnPFJoAGSefoG75yQmO7y8vpI5OJYqxYLKsHI40Ha14qHeBQ GJlr3rHpIw0PrJMOEYUFy+oGAxzJzEMK/O66+uXbpENI6rHBNwCkq0MVY/Ls5bWXDOQS1Y2tlwCY HFGipBwVmOI/S4eQtGFly1YQfiedIxGMi8c0t7nZWKaP1i2/di2AzdI5ksTs/swYEzs/ntyPw3to Y610iCRkohgj8DTpDP3C1NDW1uzsfqz5XU1HA8jW4XyE3xL50bq3m/gSuN3EIUKcmSVP+zOns/49 IOeP9dgvMvTlgfx9Cwzo7wu6qbKhNSsvS/aLgax8bstzWwrvlQ4RDWJkr4lH1XF1C46UDnFANV05 gNKxTHtfTOkcv/eR98UYM4iBJukc/cNHbB6+ydkDBcMwjLTbWRowe9RJkeD+MtiIcHaaAbxDvqsm B8t3AEhnxzbGe+cur53Sn79a2bB4CgFpHeQSEe7cNZDKJpOdjooAkT/fx5y9fWM2KHV2PDS27JUL AKR02bMWY16Y1zn13QBGS+foL+v0LyKfKZ0gaYbhzUOGnP7dihZzdpoB7GtMWHIjCKdK5xgIRv9m xwic1lmxPU6trHgplTOaUWAymfncskffx0zGm+dkb4UInR0PBSaVSxQBAASMHdfQ6k230QPxvhhj otT+EgIAkbtvdRnk7JdPXAh+tLWfu6zuNLjf3jwqm26pW/60dAgJt3fVDGGmf5TOMWCEC2d3TK3v y1+pbmptAOHCuCIlhUBfOXHGvKHSOSSsu/+DfwTwpnSOZPCE6rrWk6RTRCGwNnPFGIGcnRljYED7 bqWxgTcvKg7E+2IMTCldovi2s2Y9ObFEOsT+ELOzXz4xCd8asnW1dIgoWPLnLeyh0eO+7PPrq9Ji 6WcBHCOdIwoE87m+/HkGPhtXloQdubNQ/inpEDKIGfyEdIqkcJDul8d7dJcO+x2y0gnzL5x8OX1q c1spgDOkcwwEZ2DfmNfF2LyHZgwFkPZzsCpyG0c690Ga99CMoQwcJ50jYX/6zIWP7pAOEQWilDa1 6ZdsNu/YfU7hp6VzRIcO79MfZ/TtzzuNPp3V2TGCyc6+MaR3OdneXrp3bv+TNwAAIABJREFU+nYA a6RzJOz4o+oWDJYOsa/NW3rOAFAunWNg+DwX/9tGyetijLfvOBeAs90Ie8sCzp1hUdyx/Qx4/vuz L/Jkv9jcB+pHArhAOkdSiLK5X2xHSXgrgMOkc6hIjNjZU/ER6RAisnVY+8Wjp9ztxUsEIvKm2VUv mXKTc25vE5FxtglcH+QqTEnaJ1YOyuvBNBP58EsIJveKMYJxcko+Tkx+FGNsbBOArHRoYxTxC+kQ SZvfVVMO4DbpHCpChM/6chZVXxSLPY8BmVlmHJSUlHhxADRnsKMiHFyqSA6+zO8f9mI8fyBeF2ME cv4gvt4g0NnSGfZFnO41yP3iSVv7LHVRBPCnmxqWbZQOkbRisfQGeLJXTL3tqNy28HrpEEl7ccX1 b3CGlryl9HDed2DKXhMPEFzcR3+WdIAoWPgxuXIgXhdjDPbkjQDGMTt2RhC59wYobtaaX0lnGKi2 tubSLJ0vBs7MobFvy3fV5AB8XjqHih4zfz6L544RZWmpIjXtbrqQataUpv552Q+OjYuY4EnXZCJv xvP75W0xNmdZ3TgAR0nniEj5D1Y2OfNvaWtrDgCcIp0jYa/MbFj2snSIgdo8cvNkZGkfUaYGcbsc G5b+DTx5AKt3GF9d8crl0iGSl6l9n8O3b7U10iEGav19LS8CeE06R8JORXObM30KquoXHo3UN+94 29FVdQu8fa55W4yxIa82+xWLPdXSGfZ487C3joc/H/De+qV0gCgw8/ulMySJYLM0iAMAGEZG26Bn AyN7P1+LbL1UCRH68T3Nfjw3+6Bi/DbrTpdpCqqlI0SJczmvxvV787YYI7BXMzeMoFI6wx4B8buk MySNPCjG8vm8ATBDOkeCtq7PFZ+SDpGkeZ0NF7A3G7b3xTv7+Bf6+udTgs+vblzo9f6JfR356ubf ALxdOkdSCPQ+7Pq+Tjei1D83+yqEdWZ8ROAq6QxRMp6N6/eW/g/7gXlxkv0ehuwI6Qx7WIPTpDMk zTKn/qFyzEUPXQjgaOkcyaHH85NWFqVTJMmCPTpX7B229e2Pezx4Z+Pzz/kdVq2aWQAoM4c/Azim 6rETUv9ShYHM7RtjS86Mjxg0UjpDlJjpROkMcfG5GDtZOkCUmGmYdIY9COTMm5+kmKAk9cVYwIEf S196ick+LJ0hSXNXNFXB42WoBOpjcWW8OKB9f5hwuc/7J/aPMvV5hkH6P8vGpv652Vfk0MohApwZ N0bEq0mWvXlZjO1ejnW8dI4oufShIs7czNjGGye3Py8dYqCY+H3SGRLF/Ih0hCRZW/w4fD4/jvs2 00V9/PMpEyCX+6h0iERxtl6uMOMy6QwD9fz9Vz0HYJN0joQ5U4wx0VDpDBE70Yvlu/vh5T9q9MU/ HwXAr8MxHflQfeuRCyoYGC+dI2G/JEr3oaPzHqg/A4A7G4vjZxnZaWt/e1fNEAJuks4RJ+7jMkVr +jqTljKMm0fVtA2RjpEUKu95BICVzpEUAo4fO+2elL/4JAZnbqniccdOv3eQdIjdnHmJH5GKCY+f crh0iDh4WYyxyR0hnSEGTrQjP2zrkFMBONO6NSGpf5jYgFP/lrWPfjeztvMt6RBJKSuWfQjAcOkc cWLCm33582TZ98O+hw8alJ1DoNf++IZNAP4gnSNJxnrQVZHS3/yqj4Kc3ezENhkC+1aMoQc9o6Qz xMHLYiywOFI6Q+SYnZgZs2ScmYJPCvvxMPmAdIAkEfCQdIak5PN5w+CPS+eIGzGv7cufZ8OpX1p8 KMz8SV+X7ewPZW3fGCj9xRg49S8z+8qEbjTxYP9mxmBgtBhLC7bWu5kxZjc+VBncL4bAmFQ/THYf gH66dI4kMfOj0hmScuyFD18KwNsuU3sQmbV9+fNszXMxRXEGAcdXPn7CNOkcSbHI1r4xAO+ublhc LR1iQEzgw8vMPmFyZd+YGy/xo2RBWoylBRN5V4yB3Dhk2RJnrRjb8sL/nv9n6RADwYG/HfYOiDP0 Bp0oE4cAF4q0uk9/gfr451OKMnQItLU2U015ABAo3UvM1w0yf0Kfj6VIN4YjHRWZ/OqdAMAQ/Bvf w9NiDEyubJ6MUol0AAAgZGtmjEC/yefzqd40Tkypfpj3w8s31y/3flYE2NWYhYBJ0jliR3jzIw0P rOvLX3lh6QefBZCFfYOTx9Xfc4Z0iCSsX3btGgCvSudIEiPlXRWXtIRg/EY6RpIIbixTBKFMOkLU GOzj+N7TYgzWu19AkHwxNr+rZjiAY6VzJIlTftjznM4pRwG4UDpHohiZmRXjXGZmRX7V946mxAB+ HUsax4QBZ+X3AKAMzXrvcvGExrZ0L80iSvVztB+Orb5svgsNlbwbC5OH/ybA12KMqFQ6QuRYvhgr 2NJTpDMkjUy6m3cw52Yga90vMzJYm9/VdDQzXymdIxm8sl9/i6kr4iBOIuYrx81oPUo6RyIydn4g gKDAhRnSIQaCKN0vNfuDd5aJd1RkDwsXaz0c38PXYszDX0A4sEzRhJy5YswgeEI6w0AYzt5+Mc5I MVYshLfCz++6dzAIOvv3N3lFtEmcVcZFzsQh0JS9Jh4AmVQvVSzCPimdIXny4yUCe/d8IIIWY2nh 49sAwIVfQCP+pidhW4dsHPK0dIj+mvfQjKFMGdhP9Nd2DN841Pv9Cd9tbywD8S3SORKypTh8Q78G c8/vPOYxAJsjzuMkZrq1uma+E42e4nT4a9tWAez3gd7vwLUnzpiX2s546weX/h4Za+IBIgfGS+Th WNi/AhPwtBgjH5cpOjAzxiT/pidRhF+2tCwJpWP0V7hz5zTAjS6cCfpFS8uSHukQcRtUEl4NIBPL 0gjUNfPsVYV+/eWVk4oA/W/EkVw1iivKr5IOEbdVq2YWiGiVdI6Ele0oVjRKh+i3JS0hgFQfEdN3 ToyXPBwL+1hgelqMgVm8cImBA/8mduBNT3LIUqqXVhDQIp0hcRlYosgM4iy1Mwf3c4niLkx2QH8/ XfiTAJN0irhZZKdJzx7EKf8+J071kv++c2FmzL9ZJCYvx/eeFmNEPv67RN9w3N5VMwSgSskMSWPD v5DO0F/zHpoxFIwG6RxJY+KHpDPEbe6KuqmAK4eKxo8wsGKKKcjKvjEAOL2y4Z7J0iHil8F9Y+DG UTVtQ6RT9BdZylgxhir5n5d/s0iGycuXTT4WLV5i4ZmxkrD8ZABefggOhIrpXQrDO7qnA/DuwMdD sMUdJY9Jh4gbM39SOkOCXri5tvMPA7rA/R9cDWB9RHmcR4Y/LZ0hbjYsfQRAqs9/7DsaNLi8cKl0 iv4q5jjVK036gSoGF04Uu3tNVw4edlJmZi/rFi//UT7+sEi4GDOwLqx/TtLGG+uXPSMdor8YnO4l Lf3z21svvf9N6RBxmt055QQCpXfvSF8x/mfgFyEG808Gfp2UYDRV17WeJB0jTuuXtWwkYLV0jqSx Mc3SGfpr/X1XrQHwhnSORLERGzcdVfqCd7NiPvOuaAEA8nPNvPA62WztF2Pgyb4fMuuGeQ/NGAqg XjpH4hjeN2ogDj4FT7+394tMBMUYwIjmOilBnMMnpEPEjZm9/7y/A3PThMaFw6Rj9A8xId3ndvaZ lRs3lZUEHjbvAGB0mWJqEBkff1i5fD4v9vNiUKZmxghI7fp2u33H+5C9LoqAwc+lI8Tp+/dNGwHg OukcCXrjxdzOSPYAPj80WAng9SiulQrMHxo95e7DpWPEich4/Xk/gPIim2nSIfqLka19Y0QQK8ZK bUlO6t6xYi8nW/wsxtj6+cM65ZTVch8uRqaKMaT4oUGU3qUsA8BA6PXgrKSsMBPAYOkciWH8OD9p ZTGSa+1qrX1vJNdKBRoUlOZukk4RKwp+Jh1BAhPS+/3OyNq+MbFxU8F2+1mMedq7wMtizFcbRm0Q mXae31VTDqBa4t5SiqaQyofGwvbGYQyuk84h4Ombp654VTpEXPJdNTkAt0rnSBIRRbq0kBHF/rP0 INAnJk6c5WUbaABY297yCgN/ls4hoDGtSxWLFKb2JWc/HTehsV1k71aOc14WY0y6TDE9yM/Kuay7 TKQYs91lJ8LDrjwH8fJHpzz4onSI/tiRK85AFpcowu/9YseGpX8DYKx0jgRtCXLdkbakNzu6OwBs jfKajhv9xqghl0uHiBNRZg703lt5gYJUdlV8cek16wG8LJ0jQcFO2niCxI1D4+eYzXjYoA/wtBhj Pxt4oEAs8qbD5tjrzlz7kdrzxUD0QekIEpj8LsYMZ+eQ593ab5i0cmeUF1y78oadxGiP8pquY6LP SmeIE1n2emnygbBN7wHQhGwtVTQciLS3L+ast7PiPvKyGPNVrlSqOw6LvNmRwil9WMzqmHoYgFrp HBKYrbeDsrnLay9i4DzpHImiuJYUcqaWKgI4u2ra4gulQ8SFbdHrlzAHQsT1aV2qyOBMLVUksEgx FnDg5zJFbeCRHsTk52GQPaHMmw4muYMLBQTMqZwZC8hcBiCLZ4s8N7O283npELEh8v4Q3310V/QE S+O48LadJfcBiHTGzXnW30Og1y2/7jkGXpDOIaC8h80M6RD9QZTe5lj9QYDMMkUu+lmMGRNKZ4iD l8UYiL0sxkyJ0Zmx+HHO2FXSIfqDGX8jnUGIt7NidyxtqGbgMukcSSLQsmualm6O49obVrZsJWB5 HNd22PurGxZXS4eIC3m+X/RATEq7Kha6C08A6TzDsz8Y0JmxCBGzl787fhZjns6MhZDZM8bI1MzY M9dPXfGGdIi+mt9VMxxAFrsoAh7vF8vl7CeRreY5YOLWeG/Ai2O9vnsCBnt7CDQRRXIWXdowUD9+ atth0jn66sUV178B4DnpHAkSGT9ZDr0sxsDQmbG0IIKXxZgJw8RnxmY9UH8MAalcm94/6VxCUSyW vR+A0MyprNCyl8XYvIdmDAVwg3SOhG0rC3FfnDfoKRn2U2SrqyJAuDmNA/fesBx6+fnvhbJiSeF9 0iH6I2P7xoYfV7fgyKRvaoLAywYebLQYSw3LfhZjMMl/uMjYLC1RBKX0IUHMqVyyEoFXPlLX4eVZ Q+HOHTcD8HIAfSAM/u/r6pdvi/MeL907fTsIP47zHg4aWiwtfEg6RByeX3r1HwC8Jp1DAsGk8nuf UvrSs796qCTx2THD1suZMV97QnhZjPk6M4Yw+ValZDK1RBEhbOoeEvO7aoYzYYp0DiE/kw4Qh7a2 5oCYPiadI3GGkllCaDO3VBHE9Ck0t3m45JUYGV2qCHBd5bRFI6RT9JWhdL707C8TJN9R0TJ5WYyB rZfjey+LMZCflTMZiQYemWreEVZY8yvpEH1VDEsvR0aXKIL9PGdo8/BNHwAwXjpHwja8ZHo6krjR ERu2dgB4PYl7OaS6alvRy2YwDJvVpYqlhil1P9Ot20t+Cfi53Gy/OPlijMnTvcaeju/9LMYs+/kh twKH+GWrrf3v414iFQvma6QjSLHk534xNvQF6QyJI2rLT1pZTOJWq1bNLAC0JIl7uYSYvfy9IjZe fg/0BjNS9/2/YWXLVgB/lM6RFBJogkZsvJwZI/i58s3LYowMedn6kslIbMg8SeCeMhipO19sVsfU SoDeK51DyBsvP3zxaukQUZu3vLYOjInSOZJG1ia6dNBaZG6pIoPOqWxY7N2S5nXnPf0bAKnrghuR muPqFo+VDtFXnKHzxljgrDHL7GcDD+2mmCLWzx+WZZvoUrS2tuZSANVJ3lMSg56UztBXAZmr4evn +FAYXfl83ru3ZDaDs2JEWHtjbccjSd7zhWVXPASwv4eFHwCR/TvpDJHb9T2Q1dkxExq+WjpEX5HN 1L6x8RMnzkq0ODK+HolCfp5R5+Ugznp66DMSnhnbNHJjNQAvp7r3h4P0zYyxReoewpEhPCgdIWpz V9SfC8Zk6RxJY1ArJf6QJSaYzM2OATSlunHh+dIposZMK6UzSGHCtdIZ+sqmsFnWAJS8duRhlUne kMjPfeTk6V5DL4sxXw99poRnxsA0LtH7yeoe8caw30mH6Is5y+omgnCqdA45dqV0gqhZy/7NWvSC 5fAekRuTlbmvMKbg89IZosYBefdypg9Oqaq/5yzpEH1RRiN/C6BHOkdScsBxSd7PelqMWU+7pXtZ jJGnM2NElOiHi2Cy1M3tDy0tS1L1YKCAUvc2NEIv3zS106sN4HM6pp5MwAzpHAKemlnb+ZTEjde2 X/1rAL+XuLco5suqpt1zsnSMKL1w/wdXI6PnjQEAG5uq58GapU3dyFATDyZOdDxFDC/3jOk5YynC xKkaVPeW5WSLMc5Qa20GiwwG+yvfVZNj5iukcwjqSn5ZW9zM/4Gn38kHR3cJB5C+vwTD1nq2N5GY GCulU0gh4ErUdKVtW0GqnrsDwUi4GPN0ZozZz9lULx/8ZNEtnSEWlGx3HKJkp9UlEShVD4UxxbJ6 AEdJ55BCzF3SGaI078GGEwFcJZ1DQE9QMIskAxRyuBvw9JlxEARc49vsGAx59b3QR0dVVbxaKx2i L4iRqq0BA8JIdNuHhS1L8n6JIbtTOkIc/CzGiHZIZ4gD6cxYbIjSVYwxOFVLUqIWBjmvBl02DL+G DDXL+Qv+8Yeblm6QTPDSvVe9DqL7JTMICWDtl6VDRIls0avvhT6jdC1VZE7Xc3dgEp4Zs8mOFxND 0GIsLRh+/rDIcMINPJJ9kyOJA5OaN3QL2xuHAZgunUPQCzOnLH1GOkRUZnc2nAPQ+6VzSDCMedIZ AADWupEjeZdXNy0+TzpEVJ574NqnAbwonUMM47LxU9sOk47RW0FpkKFijJJdaeTpMkUwvJxs8bIY A8jLYow5uQ/XXe2NowgYltT9hG26qWZpah7gO0rs5QAGSecQw361tCfmrwMg6RwCXhi6afgK6RAA sO78Pz+QxTPHABCYvyIdIlKELM+OVdhcITUvdp69t/kFAJukcyTksDH1bSOTuhklvK0lQV6O770s xpjYy8oZSK47TiEIM7NEEcBTaWoGkfUlimT8GWzN6ayrBXiKdA4h81palrhxZkw+bwFzt3QMCQzU VjYs9ud3kDO9bwxMaeqySwzQaukUSaEEx1XMyZ5Lmxjyc7LFy2LMeFo5A8nNjJmE27BKohR1dJrV MbWSgEukc0gK2Xox2GIGgfmfpHMIsRZ2vnSIvxIW5gN+nmFzSIa/BrAfs7NhwYvvhwGoGT+9LdED hgeCYVPz/B2ogJNsipbwubQJYavFWGpYtl7OjCV6zhhRZooxRno6OhHMJ+Dp57aXnplZ2+nFcrK5 K+ouB+gc6RwyqMO1n+O65dc9B/JrCWxvEePcqobW1CxvO5hdP0d6TjqHIBMWC5+QDtFbROnZrz1g Cb7k9rW1fU5nxlKE/fxhMSfawCM1b9YGyhCl4uDJWR1TDwNwi3QOUeTHEqRvPXJBBRj/Ip1DCrGb DTPIspO5EkH0zeqa+eXSMSLh2dEXfUe3pKaRB/MfpCMkxTInNq6yCfYYSBSFXo7vvSzG2NNiDESJ rQEmxtik7iWtp5COt6jEwUcy1FRl/6z1YuZiyPYh/wfITrfSfbyxvZj7qXSI/Rk8tOS/AbwunUPI eFte7sVB0Ey0UjqDsGFhrpiKF3cWZp10hqQQzJjk7uVna/ui9bMnhJfFWM7XYizBmTEmjE7qXsKK r5bvXC8d4lDa2ppLifg26RzC2FqzUjrEQN3VOXU8Mf2tdA4pDCy4rWmpk4csr17S0kPErdI5pBDx F6rqFqT+JYE1gRcvbQaE8MlTm9ucH5Af+dpbzwNwo5FP7DixYgzsZzdFNn72hPCyGAtzRS8rZ0p2 2jm5Lw1BRFifn7SyKJ3jUDaPeOsaAMdK5xC2embDspelQwxUyOY7APxYDtYPOQrmSmc4mCLY6Xwx q6Ag9+/SIQZq/X0tLwLIzPK3Axi9bVvxaukQh7Jq1cwCAy9J50hIYuMq9nTPGIyfky1eFmOGypx8 6zpglExr+wXL6gYDSOw8DEls3X8IzHpyYgkDX5TOIY2BDukMAzW7Y2o9gEulc0hhoOvDU5f+XjrH waxvv+YpIvxcOocUBmZU1S+eJp1joIg49d8XA8WMf0jD7BgIqX/J1ktHjGluq0jiRr4uUyzrMVqM pYXp5u3SGeLAQFkS99kZhFlZoggYvCEd4VBo4xE3AshMd8sDIeZO6QwDMb+rppxgvi+dQxbdLp2g NyzwPekMogx/J+3NPCy0GANQvW1L8QbpEIdCwAbpDInZ3J3M+MrTmbEdZbpnLDU2Dd+0VTpDPJJp 4GGsyUzzDli8Jh3hYHYN4Dnzs2IAenpKCv8rHWIgisXSLyDbRfULL+W6fyIdojee337MfwNwfi9p jI7jitLPS4cYiIpc988AFKRzSGPCPyQ1G9NfzNlpmpOjXCJLFRPe1pKYQcXhW6QzxMHLYuwzFz66 A4Dz+4D6ipFMAw8mk5mZMSa8KZ3hYAphya2gbOzfOzh+5GOTVqb2JctdnVPHA/CiU12/Md+Zhv2Z AICVk4oAz5aOIYv+Ls3NPJ7+6Y1bGHhUOocDjs1tCT8iHeJgiN1foRIVm1ATD4aXxVhhzdImL7ch eVmMAQAI3lXPJqGZMYAzMzNm4G5nntu7aoYQU7YH8HtweveLMYNCa+4E4PTb6Zh1F5lS1RijBCV3 AvDywd9LFTC5eQCTdJD+IqR7aXNUmPj/njhj3lDpHAfCGfqcGZPYsUE+FmObpQPExd9ijNm7HxrD JrJnjLLUtY/J2WUspcXSTwI4UjqHCygwqR1UzeuovRGEWukcwv7zo/XLnV4SvK81S1s2APgv6Ryi CJOqGlo/LB2jv0j3je1xxM5ChbNHoxjDPdIZksKWk1p55GNre+8mWfbwtxgDefhDS2ZmjJGZM8YA w04WY9+/b9oIAJ+TzuGIjcPeGLZKOkR/3LFi8mgm+lfpHNKYTCobYhBsKnNHiuhfx9YuTuULurVD Sp8AsEk6hyM+Vzlt0QjpEPvDjMwUYyBKanzl3cwYaTGWSt790JLaMwbgqITuI44tOfkQyJUWPg9g uHQOJzAebGlZkspDQQOb+x6Aw6RzCHvslqkPPCEdoj/WLr3mMQKnMnuEhpuSlO6fW9ISAuiSjuGI 4WTps9Ih9svhFSqR44RWu3jYTZFB3q1428PbYow9LMaS2zOGUQndRxyTe8sj7mpvHEWEj0vncIZJ 536xuR31VxFwmXQOaUwpbxPP6WjHHyvGtOrGxR+UjtEfulTxr3xq3IxW5162Migze8aYEhpfedlN 0WoxljbE/m30S3BmLDP7lIyDxZgttV8E4Oxm68SF6SvGZnXVHMHgb0vncMCGnT1Bqvdd5Wj4PYDb R2AkgcHfO65uQeqeDUVrU/f9EaPBtgd/Kx1iX8ZkZ5kiJTe+8q4YI/Zx+9Eu3hZjIPbwh0axN/D4 bntjGTJUCDAbpx4Cc7oaxzDzTOkcDnnm5vrlz0mH6KugUPofyNBLjQMhYNZtTUtT/dZ7zdKmbmbM k87hgCOKQZC6Fwzrl127BkSp+w6JDeHWMZe2ObUvPFN7xoBhpza3JVEoeVeMsdFiLHWIjIczYxgU 9z0qKrKzRBEAmC1LZ9gbF8L/B6BcOoc70rfEaPby2hlMuEI6hwOKnAtmSYeIQq4kdyc8PLuy7+iq yqaFl0qn6DPWFvd7KQ/C8IvSIfZGxE49h2NGb+1IZJwV+3gxacw+TrLs4m0xBg9/aAQMjvse1tpM vc03RM58Bu7unHI4Ea6TzuGWIFXF2PyumuFEdId0Dkf86OZJS9dLh4jCs/e2PA/gx9I5XEBs7hw/ tS1VTWmY0/dSJ172+jH1bSOlU+zBTKk9y64/crYQczHGBA+LMaPnjKWQl8sUEczvqol11oRsmK2Z MYIzD4Fum7sJ2T4YeF9hLrfzQekQfRGGZd9Gls7pOwjD7FVLf9IjCvYYHZYUvyUdoi8sl6wAkMqO rPGgQQEVb5RO8TZif8ei+0EcxPrS+9jp91XAw/E9e3lk1S7e/bD2YE83+oXdZbG+7SAEznVaihNZ dqIYa2trDozhj0jncMxjN0xamZozgmYvr53BzB+SzuECBrpurOt4UjpHlNa2X/k4EX4uncMRH65q WPQB6RC9tX5Zy0YAXv0+DhjhY2huC6RjAABztooxi3hfeg8q7oh9FZUE1m6KKeThMkUAMMTxfsg4 W3vGLBknirG3Rmx5HzOqpXM4Zql0gN6a1VVzBBF5sT8qCuTZrNgelvFN6QzOILozTd0VmSk13ycJ qareWpwuHQIADBl/x6L7QaBYx1lFDr0sxoynkyyAx8UYe7q2NDR2SKw3ID4i1uu7hjips9sOimH1 XLF9WTwgHaG3gmLpnQCOls7hAgb//qbaDi8Hvs+f96f7AfxBOocjRoW5ktScIRew9fJ3ciAY+IR0 BgBgtjnpDEnimIsxyzG/tBdijdViLG0CY7z8oRmmuD9kzmzqTYIBxDei39XZeAoBk6RzOObVFx+9 8FfSIXpjdmfd1QxcLp3DFQb0b0TwsztaPm9BSNV+qTgxc3N1Q+uV0jl647kL/vwkgA3SORwzqbJx 4SnSIQASfw4niWFjHWcF8LMYM+znuB7wuBizIfs5MxZ/R8VMfSlaS8OlM4SwV0tncNCyfD5vpUMc yh0rJo8m4D+kczjk5e2FYJF0iDiV8PAfAnhFOocrmPC9sbWL3W9as6uQXiYdwzFEMFdJhwBY/Dmc JKJ4i0+bi7/ztgQGeTmuBzwuxkLysxhDQLE28GBgRJzXdw/LF5/MLdIRnENol45wKMygkjA3B5y1 z8xBfTfthzwfypqlTd0AUrM8LwEjgxz/YHc7bbcxp2bpc4Lkz0Rk+ZeiieJ4i0+2FO92FiFs7FvS GeLibTFWWsylpgtbXxBjWKzXR7zXdw0ZEl2WOWtF/VkAJkhmcFBIRXL+XKA5HfUzmdAoncMhWwvd JZloYhLa3B0AtkrncAUDtZWNrTdL5ziUQo6WAXB+xj1hx1U1Lj4f/NQaAAAgAElEQVRTNAFl7IUW xzszRkRD47y+lHBnUYuxtCkc+ZqXxRg49j1dGXtDZaslb08hPih5fycxHr+pYdlG6RgHM2dZ3Tgi /hfpHC5hYM6tl97/pnSOJKxf1rKRiO+SzuESAn1rTP0PnX6x9NK9V73OpC3u98UkvTqDx8neP2EU 8ziL492TJoRfHFnh57geHhdjM89eVQCwTTpHDLQYixSdIHp3Ym38sA8mt1va5/N5gwA/AODl28d+ KhqT+450iERZ820ARekYDhkcmNwPXDm76kAM2OnvFwkE+qDsMlNyuoiPQawzYwzZFT8x2YIlLd4e 3O5tMbabf1U0cdzT+fJ7qJJ12KyuGpF2/rOW1Z0H4DiJe7uMrNvF2JiLH/4cGO+VzuGYtpumtK+T DpGktQ9cuZbBP5LO4Ra+qGpr+GnpFAdnnP5+EcE8bmzTPRMlbl3d1HY0MrY9AvG/9PZw2Sf7N57f i9/FGHtYjMW4THHWkxNLAFTEdX1XkS0VmR0jg7+RuK/jXnO5pf3cZXWnMdOXpXO4xhr6N+kMEpiM l4dbDwx/pbK+9VTpFAey9tynn4C2uH+HgLlZ4r5hGB4vcV9hg1HTFd/ZauRfQxQCebtfDPC9GCP/ ijEGxffGY8vQrM2KAQAM87sk7kvAdIn7uozBzra0b2trLmWDuwGUSWdxTOfMKct+KR1CwgvtVz4J 8ArpHI4pJ8MLJk6cVSIdZL92fb8sl47hGhZ6HgXGniZxX2mjg+djG28ZZv+WKXo4nt+b38UY4N1m cqL4Zsa4UOLh1HYvMCW+PGPOsrpxAE5M+r7OI3K2pf1bI976EoCzpHO4hpm/Kp1BlME/SUdwD531 +pHD8tIpDkz3je3HyZXTFo1P+qYMOjvpe7ogKM/FVoxx/L0Fksf+jef35nsx5t+0ZozLFAOmrK3b BgAwCQywiZsSv6f7wjKETra0n9NZ/x4An5fO4RoCPXRLXcfPpHNIWnf/1V0APSydwz38hbH1i98j nWJ/tMX9/lFIDQK3lW2rL4SKQZxLCb17sc7M/o3n9+J3MUbk47TmqNiubEw2u8Mx3vXd9sZkl54R aTG2L8bj109d8YZ0jH0tbG8cRuAFAJzuEifBIsz2rNjbSGfH3ikwhhdMaFzo3Eu+l+696nWAfiGd wzkGiT6XJjS2lwFwdn9hnMjYOMdb8Y0TpZDRYiytiNnHac0j4yocCDw4juumQGlFECa2bn1+V005 gJqk7pcWDPqpdIb92V5S/B4zqqVzOOiXN0/t1L03ANYtvWIpgZ+QzuGg6h6Y70qH2D97r3QC5zAm j2luS6yJVzdvPB1AaVL3cwnDDIrjursLXO+KMQI7ffboQHldjMHHZYoADSqjo2O5MttYvhxSIUBi +8bCsKwGQHb/Wx9IDvdJR9jX7I76FgJdK53DTfRlIrB0Clcwm69LZ3ARAddXNiwS6dR3MCGxFmPv VJHbVrwkqZsRUSaXKAIAx/TyuxC+fiwAwTPj4sHAZukMcfK7GGM/zyWwKI6O47pMZkgc100FTq4Y Y9b9YvvxzC2Tl62WDrG3O1ZMHk3gO6RzOImx+sWHL9DB7F7WPXDFjwl4SjqHi4ho9vjpbZXSOfa2 vv2ap0D0nHQO5zAS2zdGTOckdS/3xPPy21LJmDiuK498XOn2Nq+LMTZe7hkDLMXyYSMgq8sUASDJ jk6NCd4rFRhwaokiMyhnc3PhY1eqCDDRl109gkAOMRg6O7Z/w8OwOA9gp97Yky5VfIdEW9xTos9d p5iYxlsB2Vhe1otj6+d4fje/izH4WYwZcCwfNsuZLsbetXsvV6xmd045AcCEuO+TNgGRU4OiOStq Pw0k94Y4TQj48/A3h/1IOoeL1g7NtQF4WjqHkxhTqxsXf1I6xt5sCKe+dxwxfsyli2I/iLm6Zn45 Mtq8AwAYJpbxFpPxc2bMz4Z8b/O6GAusn5U0UzzFGEyG94wBJaEtOz32u3BQH/s90mdTcfjrD0mH 2GNWx9R3EWt3vANh0FdbWpaE0jmctKQlJMI3pGO4ioGvj512jzOH/I56fevP4Ofe8gExSbS4H1R+ BgA3DwZPAMf18pvssbFcVxgbbW2fWmyNl8UYQLEczJjxZYqwzLEvmSCdbdmf9plnrypIhwCA77Y3 lhGZHwKIfZY0pZ59MdfdKh3CZYe/umWh7kU6oHJj7T27Z0XErVo1s8DgB6RzuIYIsb80ZLYZ3i8G GOJYXn4T03FxXNcBumcsrcKybj+LMcZJMV0308WYYbw7zuvvPpIgsU5VqcHsTBfF8pLwq8Q4QzqH swjfyE9aWZSO4bJVq2YWiO03pXM47FSuKP2KdIg9CLpU8R0YNbtbpMeHTKzPW9cxx9NNkYET47iu tKIh7aaYVi8DmwAvWy8fl++qyUV9Uc74zBiDY10nX15q34OM/zfej0KupLBUOgQAzF1W914CPiOd w1mM9Tt6grulY6RBDiPuAvCidA530WeqGlsnS6cAACrruR+AEzPzDhlcwKaL4rwBgU+I8/rOI4p8 ZmzixFklAGJZOSWMX9pyjJ+TK7t5XYztfoO7TTpHDEqPLpSMi/qiBtF/OaQLxVqMEXNdnNdPJcLP b5i0UvxL9vv3TRvBBgvh+XfigBh887ampd3SMdJgzdKmbmL6N+kcDjMM/KD6svnDpYOs/fENmwA8 LJ3DNYR4lyoyZ7uRFRFFfpTQxlGDxsPPfXjbsHKS1ysy/B94MMQHenHIkYn8rRIj+i+HlDl2wbK6 OGeudL/YPtiyEy3tS0sL3wcwVjqHw17eMmjLHOkQqbJz5x3Q2bEDImAsd5fNlc4BACBdqrgvjnHf 2KiatiEAjo7r+mnAiH7PWBE5L5cosuf7xYAsFGPkZzHGcewbI66I/JrpQjtiGpDPW157LABnuoi5 ImdYfBA0u7PuaiZcIZ3DZUz46mcufHSHdI40Wbvyhp0M7ax4CJdXNS66WjpEGIZOvBRyCuP0qvof HhPHpSsqeioBOHXmXOIsIi/GDNjLYowy0PHU+2KMPD1rDMTRt2G32kEuMDaWgjQE6pD1h8++GKs/ PLXzWckIszqmVhLje5IZUuD5nT3BPOkQaVSK4bMZeEE6h9voe+Ont1VKJli/7No1AP4gmcFBRBTE s7TeUNZf/AIUw3jLIP7jeWT4OY7fi/fFGIP9/CEyJkZ+zTi+HFImtKY0jusSJXBuS+rQf0vePZ/P G4L5AQDxfSsuI+av6F6x/lmztKnbEH1NOofjhtuwuBDNbYFsDPof2fu7hymefc4EE2+nxnSIY7wV +/E8EkiXKaYfg/38IRJOur2rJuo9Xpn/gjQBRV6MtbU1BwCmRn3dtCMKRQc/Yy5++HMETJLMkALP hCM3agfFARg8OLhLzx07OGa8p3Jr8bOiISz9SPT+TqJa5PORjxPJso9NJvoq0mLsxBnzhoLhZYdK Zk8nVfbifTFmyGyUzhCTIFfMRXxOB2d+ZgxMkb+d3Xz45okADo/6uin37E21nb+Suvns5bXvZiZn zjpyFRG+4sqB3Gm1eklLDxhflc7hOgK+Ut20SOzsqXXLrvilFs3vMGrsL048K+qLMiA8C+oAokjH W92FirPg6ZieiHwdx7/Nyx/c3hj2DekMcTEIIp6S1nXcZHh75BcNtYvivpjpv6TuPb+rppyIFgCI ZUmqLwj48/qgZ5F0Dh+sGxLcDeBp6RyOK2Wm1jHNbXLPIWbRpdMuMmyjX6rI0GZAHO3LbwadE+X1 XMJMr0tniJv3xRgsPK6oOer1wZlfpohi9MUYE2sxtg/JJYrFQuk3ALxL6v5pwcz/uPusRjVQS1pC 1tmx3jg5t7X4dbG7M3Sp4jtQY9RXDI3dGvU1UyjilUgcfR8BZ7DH4/hdvC/G2JC3M2MALoz4eplf psgRHxI+94H6kQDOjfKaHnjxpqmdj0vceE5nXS0In5C4d6owVr/4yEX/KR3DJ88P/f/s3Xl4lOW9 P/7355mZhC3sAgImARcQFRfUCm4gZLIg1C5EFsUqW22/bW17zulqTftre9qec2xra20ISkEIGroJ QsiC0KrgAlq17ggJgsoua0Jm5vn8/oCeg6zJzDNzP8v7dV29el2Tmft+kzgzz+e5t/AicMe+M1Lg q3lFC0tM9N24YtJz3P3yBMP7jZ7n6DR7S8TR71lvUodvfqvT14OuIeLr63gAASjGQravK+oB5XVj HNkSWBUCTttC3OFiTC27BJwf/0mKP4lAM93t754c2w2Kh8EjBs5IRe4rKyuzTefwlcWlCRWUmY7h AQKRCqcLgFZ2rQJ5IvP9ulooEgkXONpgiMUY4NyasfyiRfmAGD0eIp1s27/Ljf7F98WYrf6uqEWs 651o5w+rR2aDF6no6HAxBrGM3OF1MzE0FSiS3fJ7pOlQb5957YNnh3Ob7zTYvHzSYgheMZ3DA/qG syJzTHRs6vPJzdSSsU62d2BfmMUYYF00ocqRG+C2BUeuA91KLMvX1/FAAIoxwM9rxgBR3OBQU4Gf oghAN64d4djC4iNb2muhU+35xPbOe7s8m+lOK+oK7wSkNNP9epGqfp+jYukiKsrRsVa6Jb9o4Rcy 3WlDTuhpANsz3a+rKYqcPAdux8g3DgGZnx3hNk174MhmNaLq72JMec6Y57VPhPxeUTv0JuzAYgw4 4ORF6J5ue4YD6O5Ue36gir+Uli5OZLLPiproAIX+KpN9epZg/YyCuqWmY/hZQ/Xkv6rgBdM5vEBF ftP/5oXnZ7TTxaUJgfw1o326X8/8Ay3O7dZ35HvW+Z2LPUasZqfWjTl1U96VDjaHuJui100prt4P wM/n5Ax+qCbaK9VGEjZ3UgTg6A5PloQc34XK60LI7NbRVVUTQrDwqACdM9mvVwnk+ybW8wWN2PpD 0xk8olMoYc11clSmVdTmFvfHUXV2qiKAfQ635zl2OJTyyNiA8ZW9AX8e9nxUy47Vpb7ffdP3xdjR Cws/D3FKWHR0yo2IHfiRMXG4GFOo019e3ibYE+++e1Umu/y4297vALg2k3162NPTx9SsMB0iCBpX TFkOSMan63qTXpu3P/GdTPbYY8eBp+Dv64a2Ezi6/lkd/r71IkEo5Zvg2oKb4O/1/r5eavQvvi/G jvL3VEWR1M+xanH2AEIvUmC/U21VrCruL4qhTrXnBwJ5YtaV6zM2Sj27vugqAX6Qqf68Ti1823SG IFFV/r5bS/QH55QscvpczVNav35WTIW7Kn6SXH5OwaK+jrXGYgxqO3ATXOD3c0xZjPmI3/+Y0aNb 0yctLizG4GAxJon4WPj7blWbiW0vylRf5UvHdRC1HwUQyVSfHvfEzNG1a0yHCJLNKyY/IwDX57VO xFJd0Hfc0g6Z6jCkkrHPK48QCcOx2R5O3vz0KrUktWmKZWWWAr7eJEzE54MpRwWiGBP4e3t7AH0q 6gouTaWBcEgy9iXnYo59OajK551qyye2vx+JPZWpzqx2zf8DYFCm+vO4hIbke6ZDBFIC/wEgbjqG RwyKxA/8d6Y629TJWgngo0z15wWi+lnH2uLIGGCntpti7ovnXw6gt0NpXEkVvt+8AwhIMQbx/cgY JMWpirZtO7LFqpeJOvPlMGdFYXcANzrRll+IyKKyUaszctE5p65wLCCzMtGXHygwb+ZNNa+bzhFE DbWT31KRBaZzeId+Ma9wUWbW4i4uTYhoVUb68grBTfm3zO3qRFMK5cgY7JRugovtwBIV9wvE2s1A FGO2HYgzQ25O5cUqVuCLMVsc2mo3rOPA6XGfkEhoRqb8PFQT7aXQh8Epoq3VHLZ57pVJ4VDoPgDN pnN4hMDSh8+Nzk95B+FW0cx8bnlIlt2cndK1xr8IJPBb2yPVaYoQR/4WbqYSiOv3YBRjIvjQdIYM GD53VUmfZF9sKQI/TVGAw060owpOUfyk92ZGazNyrlI4hDnw+bQNh/3mrsLa902HCLKNS0s3Q/E7 0zk8pHc8FPoDoGm/4dJQfdtzgGxIdz9eIgJHpioq1JHvW0/T5NfqH9nSXq92Mo4rKT4wHSETAlGM QTQIxZgVP7JpRJKvTm243CdS/nJ4cNXITgDGOJDFP1QqM3F21Zz6gllQjEt3Pz7ycZYkfm46BAEa 0h8jINNxnCHFucWVMzLTlz6emX48o6h3dH7HVBuxIIEvxhRW0tdd2oJbEIhreAnC9XsQ/pCAJAIx MgYAn072haqcpiiS+p26SCLrZgDcmfIYIct6LN19lK8sPldV/ivd/fiJqvzsjjEr/b65kSdsXjZl D6AZ25zCDwRy/4CiR9O/SY9lLUx7H97SPjsUKXagncAXY9DkN/BQ0aSv97wlEYjr90AUY/FgjIwB ijHza6JJ3bES0cAXY2pbLak3glsdiOIn6+4aU/1GOjsoWzUybCUSlQBy0tmPz3ygh7N+YzoE/Z/m ROLXQGBuHDqhY8IKzcfIVeF0dtK4bOKbgL6Uzj68RqApf88pR8ZgQZMaGTtrZFUnQEY5nceNbFu3 mc6QCYEoxhKRWFC+4No3W8ntrqN2qgtJfSDFkbGK+tG9Bc6dw+IHCpmd7j76JiL3QuD/ufNOEvxw 1rilXEDvIttqpx4UkR+ZzuElorg6v/2H96a/n/R/jnnM+POKq85KrQmuGYMkNzLWvkO8BAGZgRPT YAymBKIY+/Ko1QcQkAMGRXViUi/kmjEASG1kTENTwV0U/5cC+2Lhw2ndjezh+qLhovLddPbhQ29v DbU8YjoEnajHtn0PA3jHdA4vUeC7eUWVw9PZR3ZWUyUCcg3RSlkxid+eUgua4vetD2iSxZgokrvO 855922qnHjQdIhMCUYwBgATl8EbBzQuWF3du88uUI2OS+uGrdzoSxCcEWnn0RkhazK+JdlS15wFI 6zQl/7G+m6kz36ht1q+fFVPV75vO4TFhCBYMGv9w2qYpv71k2n4AaV/76jEzUtrR0kLCwSzelMR1 13nFCzoDcGLNnhcEYlQMCFAxpsHY3h4A2jWF4+Pb/CqRwI+MqST/5VBRX3g9gAsdjON5tmVVpLP9 w4JfK3B+OvvwH31x+pgVfzGdgk5t84rJfwTkOdM5PGZgc0v7+9PZgYjy+IFjKQbnjX0s6RFJhbIY S2JkLK7yWQRkiiJYjPmQBuePqtL2IWxVbuABO4UvB9UvO5jE+xQvzBpdk7ZF73Pqo7dAMC1d7fuV Jda3MnHMAKVCVBUcHWsrwfT84spb0tV8w/Ip/wCwLl3te5Lq/0v2pQKLo/Nqt/m6SyHB2SQsOIMo wSnGRGWr6QyZIpDovPrRPdr4msAXYwqrUzKvK6+JDgYwweE4niaC8nS1PXdVSR9VcEF9Gyl02bQx NatM56Az27xi0kpAq03n8BoFZueXVPVJWwdp/FzzJEVp/5sXJjU7QdVO6vvWX9o2I6nvuMqeEIxO VxrXCciBz0CAijHb0vdMZ8igSAtCn2vLCzTJLVb9RER/NLuusE2/NwAIWfJzBOi91Arb9nXcn5aN O1QhsXhsDoAUd/IKnARC1rdMh6A2sELfROrrWIPmLFtj81Jay3QacujwAgRo6lQrhEIJ+WlbX5Rb suBmgZSlIY+3aNumG2bFdQICtEmYiGwwnSFTAnMBadn6tukMmaRt320n8CNjALIE+tjs+uiU1r5g dm3BZxTa9jV6/vY/3xixtikdDc+pK/iSQHh8QJtpxcybal43nYJar3HZxDdV8LDpHF4jkGheyWNf SkfbDavvbBbFA+lo28M+n1uy4ObWPjmveOEUUesvCM66p1NSQZtuggdqiiIAVQ3MzrLBKcZUAvNH BQABbihfUXh2a5+vAk4ZOCIsivkVdYX3nOmJFXVjLhQRbhF+LMEeq32736ej6fKa6GCI/CIdbfuZ AvvittxnOge1XSiM+8At1dtO7V/kRysHp6PpsNi/A/BxOtr2KlHrDwNLHrvgTM/LL668B5D54A64 AACBtvq6q//NVf0AXJ/GOK4Thx2Y6/bAFGN3Rmu3AAjSIachsezS1j7ZArqkM4zHWID+cnZd9OHf PTm228meMLs+ejNgrQbQNbPRXM6W3067bonjF4/l64ZFrBAWAG27k0iAJfjZ3YW1203noLbbtGTy NkB/ZjqH90gHDWHBsGHljk/p2lB92z5AuLPiJ/VIqP33vKKFJSf7Ye7Yhd1ySxbNVeCXCNB155lJ q6+7wvFEKYL1uzu4tXpKYPZ6CMzdCRFoRR02ABhqOkumiMhEAL9uzXNVWVQcT4C7Itmxz1bUR5+E 4j2otohIPwWuh+IS0/lc6KAdOZyWKTzWxz3LoDosHW373OZ9Hfb/ynQISl6iU+SX1oH4FwU4x3QW jxm2s1fnMgDfc7rhcCL263godE9bN2Dwud4QWZZXXPkqVJ8WS7YCmq0q58HGWEB5jXGiVv9ObEsn SpD2wRVsAILzLw5SlQ0oAjPkedSnHn6qaNCZnlRWVmYBaPNB0QHRFYrbANwHkZ8o8CWAhdgpVMwa tXqn042W1425DqrcfCIJKvhuutbvUWZsWVzaBBHHC4pg0G/lFlVe53Sr79VO3Q4I1/Od3FCIfFkV P1WV+wBMAWeQnEqX1mw2kzf2sQtFcXUmArmFIFhLi4JVjFnB+uMCELUTd57pSXk3ru6MoP23QE47 GA6Hf+50ow+uGtnJgjUXQMjptv1PXv7gmRFp2dWSMmvz8okLwDOukhESS+afV7zA+ZuNduI/ARx0 vF0KkvBZIxd3POOz7MRdGcjiKmrbgdp0L1AX4ILg7MzyL6oytWzVyNNOR21ubse7VpQSAX5556jl HzndblYs67cAznO63SAQW+8pKyuzTecgJ4gK8G+mU3iS6oAWWI5Pn26suf1D5c6KlKLOWYmTrkv/ lyPrHuX2TOVxCxW8azpDJgWqGLNtfcV0BgPO7t+SVXy6J0RCiX6ZCkN+JDvbxUL/5XSrs2sLPgPB HU63GwiCv0wvrP276RjknIbqyX9TlSdM5/AiAe7IL17k+Lbg4Xj45wB2Od0uBUfiDNdfO8/qfDOA 3hmK4xohlX+YzpBJgSrGtPvu1wEEbv2EhjDjdD+3LeHCcEqaQH96W0n1PifbfGjlTf1EpMLJNgMk pkh823QIcl7Ykv8AEDOdw4sU+uDR7cEds7G+dC+A/3SyTQqaUN5pfyw6PUNB3KRpU/PZb5gOkUmB KsZmXbk+BiBQ1TYAQFFSURMdcKofC/TKTMYhP9HGQ7GQo9s8q0JCdvgRAD2cbDcwFA/NHLMycFOy g2Dj8onvAEjLOX4B0MNKxB5pzYYJbSFNhx8EdLOTbVJwKPSqU/3snOLHzgVQlME47qB4GatHxU3H yKRAFWNHBXERdAgW7j7ZD8qXjusAlS9kOA/5hvWDr5ZUH3ayxTn1hV8TIOpkmwHycZaV+JHpEJQ+ aul9AHabzuFFAonmllR+1ck2G1bf2SwKHqpOybqr77ilJz0iwVL7SwjgdboIXjSdIdOC90dWCdwf +ahp968Z3v74B0Ptm/8fOAJByVC83mVP54VONjn7qcKLAOW0nyQJ8JM7xqzkGhYf27xsyh5A+B5J kqj8LLew8iIn22zIiTwK4HUn26TA6BaJH/jS8Q/2Hbe0AwRn3A3bj2yR9aYzZFrgijGVRBBHxgCg e6eDOZ+YezxnZUmeqvzAVCDyNoV+t7R0ccKp9h5YXpwtCV0AoJ1TbQbMpkOx0G9Mh6D069Qp9AAg G0zn8Kh2YuGx/JFznfucWVyaEOD7jrVHAWP/MHfswoHHPpIV3z8TwGl3WvQrK66BGzQJXDG29dnr 3gaw33QOEwT4zr9Gx+auGtlOE/FKAGc+44LoRM/NjNYtcbLBdmH7RwAuc7LNIBHVbzk9ZZTc6fXF pS2q9ndN5/Cwi9E++4dONthQPekJAM872SYFhXQQ25p/XvHybODIqJgC3zKdypD9DSPeCdya58AV Y0fP3XnJdA5Dzs450Onr5euGReKJ7PkQjDAdiLzJEnH0QnB2bcGNIspzlJL3t+nRusWmQ1DmbF4x ZTGAv5nO4VUK/Ft+ceWNzrUoqirfc649Cha9tgV75mHkqnAkduAbAPqYTmTISwjg+ZiBK8YAQBG8 xYH/S+SH1p4eb0J1guko5E0K1E4bU7PKqfbmrhrZVUTmI6CfRw5IQCxHNyUgb7Bs62sAHJsqHDCW Qufn3zK3q1MNbl4xaSUE9U61R8EikFvz2n/0JkQDvCFM8KYoAoG9+AnsJh4AEAZwrukQ5F0hVUfv /sbjWb8FkOtkm0EiQPmMMSteNZ2DMm9TzcRXFMrz+JImuXo4+7eOtgjh2jFKgZ6HI9dpgaSKF0xn MCGQxVgiLoH8YxM54G/TonWObYIzu67wcwCmONVe4Aj2JMItAb6LSrYd+R4A7qCZvCn5RZWTnGqs Yfmk5wF51qn2iIIkHIkEct1lIIuxu4tXNADYYToHkdcoxLEDnh+piZ4jwrv6KbH13lmjVu80HYPM 2VJTulsBRzejCBoVPHhudNE5jjUocOxzkihAtm1cWhrIA9QDWYwBgAIvm85A5DFN7Wxd5kRDZWVl ViKEP0CDuXWvQ17bGomVmw5B5m1uOvshAP80ncPDusVD+geUlTlyTXToUGgJgGYn2iIKDA3udXlg izERedd0BiJv0eenFtYedKKlfiPWfh2Km5xoK6gska+VjVodN52DXGD1qLiq3GM6hsfdlPf8BV93 oqEdq0sPgNvcE7WNILDX5cEtxlTfM52ByEtErAYn2qmoLxoK0Z840VZgKf7o5I6W5H1HdvKTP5vO 4XE/6V+y4BInGhKg0Yl2iIJCJbjX5YEtxmwWY0RtYqt9VqptlC8d1wFqLwSQ7UCkQFJgXzwU5ygI nSAcxz0A9pvO4WHZIbUW9J9Q1T7VhhSa8uclUbAE97o8sMUYrNCHpiMQeYlACn5fW3B+sq9XhVjZ h+cBuNjBWIEjqt+9e/RTW03nIPd5r3bS+6rcWj1FQ8MHEws7JxEAACAASURBVPMAlWQbGFjy2AWA jHEyFJHviQT2ujy4xRhie00nIPKYrJDIivK6MW2exlO+bljk4fpoOQSfT0ewAHlu65prHzIdgtxr c07oQZVgntXjFFWdkFuy6CGMXNXm854GFFUOTahdDSCShmhEvmXH7cBelyd958fr5q4q6ROPxwNb hROl4BCAX1jt290/7bolZ5wSNWdl4dWw8T8KvS4D2fwsZsMeNqug/jXTQcjdBhQ+dqlt2esQ4MNj nSCCpyWh/7apZsoZi9tB4x/OORxv901V+Q8AKU9zJAqacCLe+73aqdtN5zAhsMXY/WuGt885mHPI dA4iD9sP4C+ArITIP+y47mgXjrc02Vb3sGWdq7Cvgsp4AMMQ4M8ap6jiP2dGa79rOgd5Q37xop8p 9Fumc/iAAlgP6BIReTFu6XvaFN8dDltZloTOUuByhY6GyGcAdDIdlsirIujabkN1yWHTOUwI9AVS RV00gUBP1SQij9iwv+P+od8YsbbJdBDyhr7jlnaIxPe/CuBc01mIiM4g3lg9ObBTe4NeiCRMByAi OgMV1S+yEKO2+GDpuEMquBtHRnaIiNzMNh3ApKAXY4H+4xOR+4nIA9OjdStN5yDv2bx8ch0gD5rO QUR0BoEeHGExRkTkUgp9Y1+Hfd8xnYO8S5qa/10AbvpCRG4W6OvxoBdjga7EicjVDkMxhdMTKRUN q+9sVsgdAFpMZyEiOoVAX48HvRiLmw5ARHQyIvqDmdG6f5jOQd7XWD3pZVHcZzoHEdEpcGQsqDTg /34icq2nO+/u+j+mQ5B/NFzzzi8ArDadg4iIPinQxYgE/N9PRK60V6zw7aWliwM9bYMcVlZmx2Hf DmCP6ShERMcJmQ5gUtCLkaD/+4nIbVTvnj56eaPpGOQ/W6tv2wLoV0znICI6TqCvxwP9jwcQNh2A iOhfVFE+I1q3yHQO8q/G6ikLoZhjOgcR0TE4MhZgQf/3E5FLqOCVSKTlHtM5yP8SOeGvAnjVdA4i oqMCfT0e2H/8A8uLs8GRMSJyhwOawMQ7R61uNh2E/G/L4tKmREg/D2Cf6SxERACyhg0rj5gOYUpg i7FIKNHFdAYiIgCA6t2zCmvfMh2DgmPLk1PeVcEM0zmIiADgo67tOpvOYEpwizGVwP7RichVHpoR rVtgOgQFz+blk6tUUG46BxFROBTc6/LAFmMqypExIjLt1f0d93/TdAgKrizt+jVAXzKdg4iCTcKh wF6XB7YYE9GupjMQUXApsE8S8rlvjFjbZDoLBdeG6pLDCdu+FVw/RkQGqaCb6QymBLYYs4FzTGcg ouCyVKdPL6rZYDoH0Zaa2zdw/RgRmaR2cK/LA1uMiUie6QxEFFi/mR6tW2w6BNG/bF4+uQqqD5rO QUTBJGrlms5gSmCLMWhwK3AiMklfbIqF/t10CqLj9dxx4OsA1prOQUTBIxLc6/LgFmNAYCtwIjLm Y9hy61dLqg+bDkJ0vPXrZ8VC4fBEALtMZyGiYFHVwF6XB7cYEww0HYGIAkVV9Y4ZhbWbTAchOpWN S0s3i+g0AGo6CxEFSICvywNZjJUvHdcBwADTOYgoUP57ZrRuiekQRGfSsHzKE1D80nQOIgqUc/tP qGpvOoQJgSzGpH3sIgT0305ERjzXZU+X75sOQdRajc1nfwuQZ03nIKLACFmHYkNMhzAhkAWJpfYl pjMQUWDsFis8sbR0cYvpIESttnpUPI7ERAA7TUchomCwEgjk9XkgizEbuNh0BiIKBIXq7dNHL280 HYSorbZW37ZFgbvA9WNElAEqEsjr80AWY6IsxogoA0R+NyNat9x0DKJkba6evFSh5aZzEJH/KZQj Y0GgCoGFK03nICLfe9NuyvoP0yGIUhUPd/4mBG+ZzkFE/iaQKwEV0zkyLXDF2Jz6MYOh6GY6BxH5 WkzFumPWuKWHTAchStUHS8cdQsKaAoDrHokonboPLHn8fNMhMi1wxRgQusZ0AiLyve/NHLPiRdMh iJzSWDPxJVHcZzoHEflbHPop0xkyLXDFmGrw/shElFFPd9nT5X7TIYic1nDNO78A8JTpHETkX2Lb gbtOD1wxJoLA/ZGJKGMOiBW+vbR0ccJ0ECLHlZXZojINwEHTUYjIp0QCN4MtUMXY/JpoR3BbeyJK EwF+wG3syc8aVkxqADhdkYjSZmjfcUs7mA6RSYEqxg5b1lUAwqZzEJEPCdZ33tPlAdMxiNKtsens XwN42XQOIvKlSDi2/wrTITIpUMWYqN5gOgMR+VICCczi9EQKhNWj4rbITAD8752IHGdZuN50hkwK VDGmAhZjROQ80V/NKKxdbzoGUaa8v3zSOgF+azoHEfmPrbjRdIZMCkwxVr5uWATQ4aZzEJHvfJid EK6hocDJjjTdC+Aj0zmIyF8EuBYjVwVmWVFgijHZ2+MqAIFaEEhEGSAom1pYy93lKHDeXjJtv0B/ ZDoHEflOp3Pabb3cdIhMCU4xZnOKIhE5Td7ZGmp5xHQKIlMamvpWAHjTdA4i8hdLQoGZqhicYkxZ jBGR0+Q7ZaNWx02nIDJm9ai4Qn5gOgYR+YsgOJvuiekAmVBVNSG0t9veXQC6mM5CRD6heGF6Qe01 IlDTUYhMyyuuXAOA67KJyCl7GzuFe2Bxqe93bQ3EyNjeLnsvAwsxInKQBb2XhRjREaL6Q9MZiMhX uuQdSAw1HSITAlGMiYVRpjMQka/8466CujrTIYjcomHFlBoAPN6BiBwjipGmM2RCIIoxBUabzkBE PiL4CUfFiD5JBb8wnYGI/EMtDcT1u++LsaqqCVkArjOdg4h8470uu7v8xXQIIrfZ3DH8JwXeNZ2D iHxCccOwYeUR0zHSzffF2L4ue68B0Ml0DiLyCcF/lpYu9v2CYqI2W1yaAPS/TccgIt/I2dGr49Wm Q6Sb74sxtXCT6QxE5Btbm1pCC0yHIHKrLHSbB+AD0zmIyCc0NMZ0hHTzfTEGwPd/RCLKmIe+WlJ9 2HQIIrfaUF1yWIFy0zmIyCdEfT+o4uti7MFVIzsB8P3wJhFlRAskMcd0CCK3iyTivwfAmxZElDIB rjlrZJWvlxv5uhjLjmffCMD3C/+IKP0U+viMMSu3mc5B5Hbv1U7dDuifTOcgIl/I6tgh7uuN+Hxd jNkIxpaYRJR+asuDpjMQeYZl8f1CRI5Q9ff1vK+LMeH5YkTkjJdmFdY+bzoEkVc0Lpu0BsA60zmI yA/E1/s/+LYYe6gm2gvAJaZzEJH3ieAB0xmIvEceMp2AiHzh0vOKq84yHSJdfFuMhUMYBUBM5yAi jxPs2ddhf5XpGERek+gUWgRgr+kcROR50iLxUaZDpItvizG/D2kSUaZI5TdGrG0ynYLIa7YsLm1S 6CLTOYjI+ywV3y498m8x5vPFfkSUGbbgEdMZiLwqZGOu6QxE5H3q4035fDmNb87Kkjy14w2mcxCR 5702o6B2qOkQRF6WV1z5CgC+j4goNYn4wMbaqZtMx3CaL0fGbDsRNZ2BiHxAwEOeiVIkKn8wnYGI fMAK3WQ6Qjr4shgTHw9lElHGtNihlkrTIYi8LhZrmQ/gsOkcRORxlj/XjfmuGFOFAPDtjitElCn6 11mjVu80nYLI67auvGMXgCdN5yAij1OMBtR3S6x8V4zNWVl0CYBepnMQkbepyKOmMxD5hQALTGcg Is/r1b9k4cWmQzjNd8WYIsEt7YkoNYI9XXd3qTUdg8gvOnYKLwew23QOIvK2sFq+m6rou2JMfHwO ARFliGJxaeniFtMxiPzi9cWlLRD5i+kcRORtCv9d5/uqGCtbNTKswHWmcxCR11k8qJbIYWqD7ysi SpHeOGxYecR0Cif5qhjrH4t8SoDOpnMQkad90GVPztOmQxD5zeZr3l4FYKvpHETkaTk7+3S+ynQI J/mqGAP8N4+UiDJLVB4rLV2cMJ2DyHfKymyIVpmOQUTeprb6an8IXxVjKurLw+CIKHNsSx4znYHI r6wE+P4iopQI4Kvrfd/s1V++dFwHq93h3QCyTWchIm8SQcO00bUDRaCmsxD5VV7Joo1QHWA6BxF5 VkssnNPtg6XjDpkO4gTfjIxZ7Q9fCxZiRJQCW/EXFmJE6SWwl5rOQESelhVu2TfCdAin+KYYUxuj TGcgIm8Tbr1NlH62xfcZEaVELMs31/2+KcYs8df8USLKuO1ddndeYzoEkd815ISeBrDDdA4i8jJl MeYmDz8zPkeBYaZzEJGn/ZW7KBJlwOLSBABOVSSiVFx1XvECXxxn5YtiTA82jwQQNp2DiLyLUxSJ MkfF5vuNiFIRjtmh602HcIIvijHb4noxIkqeAvs67+78lOkcREFhHYrVAzhgOgcReZjlj6mKvijG wPViRJQCC6grLV3cYjoHUVA0rL6zWQDeACGiFLAYc4V59aN7iOIS0zmIyMNUV5iOQBQ4ItWmIxCR l8llfcdV9jSdIlWeL8YOa3gkfPDvICJzNBJmMUaUYQKbxRgRpcKKxMTz68Y8X8SI6g2mMxCRp702 Y1T1FtMhiIJm0/IpjQDeNJ2DiDzMslmMGWfhWtMRiMi7VDlVisgYBd9/RJQ0hXi+DvB0MTa/JtoR iktN5yAi7wpZ4BRFIkNUlcUYESVNFJf3Hbe0g+kcqfB0MRYLydXg+WJElLwDObs7P2s6BFFQZVnd ngZw0HQOIvKsSFZ8/1WmQ6TC08VYQtXzQ5NEZNQz3NKeyJwN1SWHBVhjOgcReZfC2/WAp4sxAdeL EVFK/mY6AFHQKd+HRJQSb68b82wxVlZWZkHwKdM5iMjL7NWmExAFnfJ9SESpGYGyMs/WNJ4Nfva1 z1wERTfTOYjIsw7Y3fasNx2CKOhyOmW9CK4bI6Lkdc1de8GFpkMky7PFmGjI00OSRGSaPDvryvUx 0ymIgu71xaUtEKw1nYOIPMzy7rox7xZjosNNZyAi7xJRrlMhcgvFatMRiMjLZITpBMnybDEG4GrT AYjIu2xbuYMbkVtYfD8SUfLEw3WBJ4uxB1eN7ATgAtM5iMiz7FCH9i+ZDkFER7QLNa8DYJvOQUSe NWjQ+IdzTIdIhieLseyWrCvg0exEZJ5C35p23ZL9pnMQ0RFvL5m2H4J3TOcgIs+ymlraX2o6RDK8 WdBYGGY6AhF5l0DWmc5ARCfg+5KIkmepJ+sDTxZjKrjCdAYi8jABt7QnchmFshgjoqRZKp6sDzxZ jEE5MkZEyVPhHXgi11EWY0SUPIU36wPPFWPza6Idwc07iCh5Gspu95rpEET0Se0jh18FoKZzEJFn De47bmkH0yHaynPFWJNlXw4gZDoHEXnWB9y8g8h93l4ybT+AD03nICLPCkVi+z23iYfnijFR8eQQ JBG5hOBt0xGI6JS4oyIRJU+8t4mH94ox8ebiPCJyCVVe7BG5lED4/iSiFHivTvBcMQZgqOkARORd vNgjci8V5cg1EaVAOU0xnaqqJoQADDKdg4i8S5UXe0RupbB5s4SIUjEYZWWeqm88FXZf530DALQ3 nYOIvEtUNpvOQEQnF0qE3zedgYi8TDrkvnh+vukUbeGpYsy27CGmMxCRt8WA7aYzENHJ2Yjx/UlE qVH1VL0QNh2gLUTEU79cInIdu8feLrtMh2ir84qXZ8fsXd01bP3vzACJh7pqOCEAoAgdFiQOhUJy MJHQls1XvbsXZWW2ucSUDmVlZVavK1/sEmnXnBWCdLQTVgcFsgHADotGNPHxv54bi4eaDtvW7q+W VB82l7jteu48tHNnrxwFIKazEJFHqTUEwJOmY7SWpz7sZtcVzBfI7aZzEJFn7ZhRUNvLdIhjnVOw qC8snBsSHQjBQBUMVMVZAvTEkf/1ANApiaYPA7oNkK2qsl0s3aqKbQLZoqrvJGKxN7euvMNzhalf la8a2RPx8GBLrEFQ9AfQG0Dfo//fD0AvHC282ugAoLsA2QFgp0J3QLDJgmxMqP1eWGXjtGjdB879 S1KXV1y5E0f+uyciajMF5m2unvwF0zlay1sjY5ALTWcgIu9S6A5TfQ8bVh7Z2avzxQpcaQFXKnQY gIsAbXck2/+GdOouWTYguQByRfSYdhUiQDgrgrziyp0ieFMVbynwGixds/lg31ewelTcmQh0vLJV I8PnxCKX2ZaMEMXFCgwG5ELEtSeAY/5DcEwnQDoByAMAgQBH/nOABQu2ABV10SYI3lAb6yzoOoiu T3Tb889ZV66POZ6mNQQ7oCzGiCg5Frw1TdEzI2NlZWVWv2vX7APQ0XQWIvIowd9njKm9MSN9TagK 5R9MXGnbKBJoFIJhSG5kI9MOQvECRJ4BZG1zouXv22qnHjQdyqvm10Q7Nodxg2VjhEKvA+QqeON7 rBnAelGpTajWdNvbZV1p6eJEJjrOK658GsB1meiLiHzpQGP1pM6AOH97Kw08U4w9VF2UHw7bm0zn ICIPU9TNiNZG09V8/wlV7UMHE59RtccLpABA93T1lUFNAtSr6hNhO7H0vdqp3GDhDB6qifaKiI5X kU8DGAOgnelMDtglijqFLtnf6cBfvzFibVO6OsorrlwJ4KZ0tU9E/meJ5m9aPqXRdI7W8Mw0xXAo McRDtSMRuZEgLXf284oqh4vIF/RA/FYAXcRfn1XtFRgHkXHxUNjOK160VsVebGW1zGv4650fn/nl wfC7J8d2i2TH7wD08wCGK8RTuxW3Qg8VTARkYs7BnI9n10YfF7H/MKOg/jmnO1Jo3GfvISLKMFtD QwCwGHOSiAzxxFgjEbmZc2uhJlSFcg8mJovqdwBcqGlY7ONCFqDXisq1ejjrp3nFlY/ZIg+9v3zS OtPBTJldX3SVaOJuIDYRwTkHs6sIZgHWrIra6Otq4T+77u7ymFPTGAXCNYtElCJ7CIBq0ylawzPF mELP5cgYEaVCnSjGysqsvOfPn4QD8XsBDEo9lVdJBwB3Wap35RVXrlPFrzfnhBdhcWlG1hWZVFU1 IbS368eTIPI1qH1loL+bBBeJYsHebnu/V1Fb8P9tXXPt42UpHqsgQCIQtzaIKI3kXNMJWss70yjU O79UInIn0dSKsbyxi0bkPX/BK4AsQKALsRNcKYJH8w7EX8stWjgBUF9WJ6qQ2XWFpXu77X0NIo8C uNJ0Jhe5ECKV/a5d83J5TfRTqTTkyE0TIgo0he2ZusE7xZhgoOkIRORxktxGCsOGlUfyihf+F2x9 GsDFDqfykwtFpCqveNFLucWV40yHcdKcmui4ipXRlwX6OAAes3JqQy0La+bUF/ysbNXIZGffeGHX USJyMYHlmbrBE8XY0Q/0XNM5iMjbBNK1ra/pN3pej529cp4C5N/gkc9MF7hMgCV5JZV1/QsfPc90 mFT8vrbg/Ir66Eq1sEQUl5rO4xGWqnyrXyJr5ZwVhW3fUVTR5vcpEdEnaR4mVIVMp2gNT1xY5CbC uQAipnMQkbcptE0Xef1vruoXzo48A555lBzFmJAVejW/pPI7w4aVe+ozvHzdsEhFXfR7IZFXodxm PSmKG+yQ/fTcVSV92vQ6YTFGRCmL5O9PnGM6RGt4ohiLI+SZoUYicrE23HE/r3hB55AdXwbF4HRG CoD2qvjprl456/OLF1xjOkxrPFxfNNza0+MlAD+GP84IM0YgQ+LxxPKHnxmf04aXdUlbICIKDBvw xLoxTxRjAmUxRkSpa8Md95hYD4LT0hyjwCUK69nc4spf5o+c68oCZ+6qke3m1Bf+ylb7GXBtoIP0 cvtQ82/a8AKOjBFR6jyy34QnijG1uZMiETmiY/nScR3O9KT8ooWFUNyWiUABYwlwj7bPfj63eMEQ 02GONfupwovi8aznVfVr8Mh3o6cI7qiojxac6WlnjazqBKBjBhIRkd+JNwZzPPGFw5ExInKIoF3T GW/uKOQHmQgTYEMF1rr8kkVfNB0EACrqondLQl8EMNR0Fl+zUXamp3TK9s521ETkbpbNkTHnCEfG iMgZonLa3f3yovMHQDAiU3kCrL2qPpRbtOivfcdV9jQRoHzVyJ5z6gqfAPA7AO1NZAgUwYjylcWn /T5PWHp+puIQkb8ppyk6iSNjROQMkdMXYwhFPpehKARARD8dieMfecWVGd2xcE5twWgrnvWKQsdn st+gE41/+vTP4MgYETnGE58nri/Gfvfk2G7gzkpE5JwznHulntjxz2f6AajLK170o3SfC1NVNSFU URf9sYrUAuibzr7oRALrtO8vEYsjY0TklG4Dx1S5voZwfTEWyj7c33QGIvKVQWf4+WUZSUHHswC9 N+9ArObc6Pxe6eigon50773d99YC+B488P3nR6J6+veX6pnen0RErRYPxV1fR7j+y0gsy/W/RCLy lGFlq0aGT/qTkavCAAZkNg59koyOh8Iv5RZVOnrQdkV94fXQ0Es8wNksBQaWlZWd9NrjyMHgekWm MxGRf1mirq8j3F+MKTxxejYReUanvrHISc+Qyu+woyc88LkYAP1EsCq/ZGEZTnHh3lqqkNm1BV+D 6kpwWqIbhM6+cXX3k/1ge+/OlwJyxqMniIhaSwEWY6lSaD/TGYjIX0Rk+MketxPxHpnOQqcUVpX7 8p6/4E/Jzvmfu2pk14r66J9F5FcAIg7no2S1ZJ1090xLba7XJCJHKdw/w871xRhUXP9LJCJvUehJ i7GQIK2bR1BSbklE4q8MKFx4dVteNLu24LJ4POtFAW5JVzBKTiQUOvm1hwiPlCAiR4m4f1DH/cWY B4YXichbBCcfGYuHLDvTWahV8mxL/pZXUjm9NU+eU1swXUTW4ow7Z5K7CEfGiMhZHlju5IFizP0L 74jIc86bs6LwhAt1jceaTYShVmkHRUVuceUf+o5betJ1ReVLx3WoqIvOU5EKAO0ynI9aKZFINB3/ 2ICiRwdBlZvnEJGzxP2DOq4vxgTi+oqWiDwopOOOf6hjdss2E1Go9QS4I5LYvz63sPKiYx+fXT/6 Aqvd4bUAphqKRq1kt2Sf8D6zEeLh20SUDizGUrFgeXFnADmmcxCR/6jg5uMfe3vJtP2AHjKRh9pA MVgsrM0vWjgRAGbXRyeKhtYBGGo4GZ2BAvtmjVt64ntMMNZAHCLyv65njazqZDrE6bi6GDvUzuao GBGlh+L6uatGdj3xB/Jm5sNQEnJUZNENM+5/MxELLwJv3HmCBZzw/sodu7AbgGsNxCGiAOiY5e6D n11djIXitut3QCEiz4rE4tnRkzz+SsaTUNIat/QZPL+qCHv3ufrGJ/2fV49/wEpIEYCTH8RORJQi 2+UHP7u6GFPhtvZElD4C+9YTHlRdYyAKpeCDj3pizoJxeGcjJ1O4nULWnvCYoNREFiIKBsvlm3i4 uxgDzjadgYj8TG6eVz/6Ewc9ixVZBoBb3HtMU3M2Fj9xE556ehgStqu/2oLMjtu67NgH+o6r7Amg xFAeIgoCS1xdT7j6G0uhfUxnICJfy2qxrYnHPtCwvPQjAKvNxKFUqArWvHgJHll4M7bv7GY6Dp3o qbsLa7cf+0AkrhMBZBnKQ0SB4O56wtXFmKi4+pdHRH4gJ2yFbsH6uYkk5IxtO7pjzsJx+Pvay2Cr mI5DR6nISd5XckfmkxBRkNiK3qYznI6rizGIu395ROQDgqsr6sZceOxDm6on1kKw7FQvIfezExb+ vvYyPPp4MfZ8zI0WXeDJmWNq6o99ILd4wRAAVxrKQ0QBIRBX1xPuLsZc/ssjIp8QmXb8QxEN36nA uybikHPe/6AXKh79NNa/MgjKUTJT3rbDLXce/6DAustEGCIKGIGrZ9q5uhjjmjEiygiV6Q8/M/4T wycbqkt3RBLx66BYYSoWOaMlFkb1yuFY+Mcoduw8ydFylE7LQ7HQ9bNGrd557IPnFS/oDGC6oUxE FCQun6bo2tuE968Z3j7nYM4h0zmIKCjk6zMKan51sp/kF1feCOAuBYoA9MpsLnKSZdkYNvQd3DDi ZbRvd9h0HL/aLiLLkdC50wtr/36yJ+QXLfq6it6f6WBEFEgaQdf2G6pLXPmh79pirKImOgAWNprO QUTBIIKGLaGW88tGrY6f+lkqeWMfH6yJxMUQuRDQISIyGIpBANplLCylrH27w7hhxMsYNvQdWBZP MkhSM4C3ALwtKm8A9psq+s/pY+rfEoGe8lUjV4Xz2n+4AUBepoISUbCFwuG8jUtLN5vOcTLuPfHe snu7fBYlEfmIKvL7xrM/C6Dq1M8SbVyGNwG8+YmHy8qs/OcG5YrIBSr2MNuWT4nocHAUzbWamrNR 89Q1eOnVwSi48QUMzPvAdCS326bAWgGet1RfakmE3tn2/DWby8rKTqhkZ5yhofx2H05QFmJElEHa EusDgMVYW4hYffTU99WIiBwnqt/EaYuxUygrsxuABhz5X+3Rx6z8FwZdpYrxgE4BLz5dacfOrqj8 UxTnD9yCG0a8jLN77TIdyU02AagUS5ZseXr4uhMLr+SWU6rgm6lHIyJqvYTl3k0BXTtNcU59wSxV +b3pHEQULKr66ZnRuiWONlpWZuU9P7gQYn8ZihK4+LM3yEQU5w/cghtHvIzeZ+02HccUBbBMRB7c 8szw2pONfKUir2jRZyD6ZyfbJCI6E4XM3Fw9qcJ0jpNx7QVBRV20DMB9pnMQUeC8vTXccvHp144l b0BR5VC15Puq+nm4+DM4yEQU5w3YihuHv4w+vQMzUqYAlqlYP5o5ZsWLaelhQlUo70D8VQBD0tI+ EdGpqNzbuGLSj03HOBnXTlMEtDevU4jIgEF949l3AkjLHbRNKya/CqD0nJJFV1qKXwF6bTr6oeSp Ct7d2B8bNvXDeQO2YuS1L/l8pExfVEvumTm6dk06e8nfH5+uwkKMiAwQ925v79pqp6I++mcoPmM6 BxEF0gd2c/b5s8YtTfPxGip5xZWTAfkFgL7p7YuSJQCGDN6I6z71Cs7qsdd0HCdtheq/Ty+oe+y0 ux86oHd0fsd2ofC7AM5OZz9ERKfwx8bqyRNMhzgZ6f8lQQAAIABJREFU925XaLu3giUi3+trtTv8 7+nvRrSxesrCUCw8RAXlQHoviCk5CuD1twZi9rzPoOqJ0djywVmmI6VKAf29DfuiGdG6RekuxACg fTj072AhRkSGCEfG2q6iLvougPNM5yCiwGqxYV85q6D+tUx1mFu84FqBVQHgwkz1Sck5p+92DL/6 NZw/YAtEPFVDb4CNWTMKa5/KVIe5xQuGCKz14Fl8RGSK4K3G5ZNd+d3q3pExwPO3HonI07IshOaV rxsWyVSHm6tve7bn9v2XKvTbAFoy1S+13fsf9ELVX0ej4tHxePWNc2Hbbv46BQDERfTn4XDLJZks xDByVVhgzQMLMSIySd1bV7hyZKx83bCItafHYbg0HxEFyvdnFNT+JNOd9i9ZcIkFa44ors5039R2 XTofwDVX/hOXXrQBWZG0bMSZPMULttjTMznK+y95xQvvBeRHme6XiOg4dmOncBYWlyZMBzmeK4ud uatK+sTj8Q9N5yAiAnBYVa+ZGa37R8Z7nlAVyj8Q/4oCPwXQPuP9U5tlZ7fgksHv4YpL30GvnntM x2kC5Ltd9nT+TWnp4oxfgOSOXThMbFkDICvTfRMRHS+ciPd+r3bqdtM5jufKYqy8bswlFqxXTecg IjpCG0Ox8FV3lVTvMNF77tiFA8W25nMbfG85u9cuXD70bVwyZCMi4UyPlumLti1TZxXWvpXhjgEA /UbP6xHOznoRqgNM9E9EdDyFfdHm6tveMJ3jeK6c5B6WUE/TGYiI/o/k2RH7z+VLx3Uw0fvmZVM2 9ty+b5QAPwLgsjlwdCofbu+B5fUj8OvyUtSuvho7dnXJRLdxAD/cGo6NMFWI9Y7O7xiORJawECMi N7FguXLdmCtHxubUFkxQkSrTOYiIjqVAbSzc8rkvj1p9wFSGAYULr7YteRTABaYyUPJ6n7UbQwY1 YMgFm9Ct636nm39bLJk6fXTNC0433Fpnjazq1LF9/M8KFJjKQER0UoLPNy6f/CfTMY7nypExFeHI GBG5jgDRSCLrmfK6MbmmMmyqmfJCLJxzOQS/A88l85xtO7pj1TNX4MFHPofZ8z6Np56+Epu39k51 N0aFyIN2c/YVJguxASUL8zp0iD/DQoyI3EggHBlrrTm1hT9Q0R+azkFEdArbxMaM6YW1S02GyC9a WKgijwDoazIHpS4rK4b+Z2/HOf2245x+23B2r93Izm7V6QZbLdW7pkXratOd8XTySxZ+WlVmA+hl MgcR0Smp3Nu4YtKPTcc4niuLsYq66AMAvmI6BxHR6cmiUMz6mqmNPQCgf2FV97DEH1JBqakM5DwR RdcuB3B2r13o3Ws3+vTahT69dqNjh6Zjn/a4JORL04tqdpvKeW50fq9YKPSAQG41lYGIqDVE9IGG 5VO+ZjrH8VxajBVWAjrJdA4iolbYq8B/x8ItvzK1luzh2oK+f6q58cdvvpU/1batkIkMlBldOh9A bv/tiUg4Nm/dS4Pvfb9u0gcmcgwa/3BOU6z91wX4BoCM7ExCRJQarWysnjLFdIrjubQYi9YBGGM6 BxFRG+wQ0f9G3JqTiZGKR2qi5yRESiH6OQDXAJBtO7rjT0/eiN17eG0cEDYUz8HCn+JqV22tvm1L ujvsN3pej0gkMkMF3wDgyvUXREQnI0BdQ/XkqOkcx3NrMfYygMtM5yAiSkITRBYiob+fUVi73smG y8rKrP7XrY1CcbdCxwI4YRSspSWMJ+uvxRtvcVfxgEkIsAyChxqufqcWZWW2k43njl04TNS6G6qT wQPIicibXm6snnyF6RDHc2cxVht9H4L+pnMQEaVCgHdxZF3PH6cV1bySbDvlq0b2tOJZdwL4IoCB rXnN+lcGoe5vVyMe56zFANoIxe/jsdgjW1fesSu5JlQGFD4+1LbsCQqUCnC+sxGJiDJuS2P15HNM hzieO4uxumgTgHamcxAROegjQFYpsEotrMehrLdmjVt66FRPLq8b00XEKrYUn1dgLJL4TPxoew/8 +cmR2P1xTkrBybOaASwTyGIrFlqxsb5076me2Hfc0g5he9+FsK0rBHoTgFEAemcsKRFR+jU3Vk92 3ci+64qxh58Zn2M3Ne8znYOIKM1sAJsAfV8hH1siH6uqJZCuCh2EIyMRKZ8Febglgj8vuxHvbeJk g4CzAbwrwNsAPrZFbFHtCqAbgP4ABsClZ48SETmlXaSp89tLpu03neNYYdMBjmcfbO7JrwMiCgAL wLmAnCsAVI+c36wOn+OcnRXDrbesRO1TV2PdKxc62jZ5igVgkAKDAECU54UTUfA0N0d6AnBVMebG sqe76QBERH5iiaJo9PMoGbMGluXovg5ERETeIVndTEc4nuuKMQ2J635JRER+cMXQd3DrLSuRnRUz HYWIiCjjVNR1dYbrijHLtl33SyIi8otz87fi9gkr0Kljk+koREREmSVwXZ3humJMLY6MERGlU5/e uzD11mrkdDpoOgoREVEmua7OcF0xJtCupjMQEfld9677cPuEWo6QERFRgHCaYmtwAw8iogzo3m0v Jt5Sj6xI3HQUIiKi9FO4btDHdcWYbYvrfklERH7Vp/cu3FLyN/cdOklEROQ0buBxZgL0MJ2BiChI Ljj3fdww4mXTMYiIiNKNxdgZCTfwICLKtOs+9SqGXNBgOgYREVE6ua7OcF8x5sKFdUREfieiGF/0 DPr03mU6ChERUVpYLMZapYvpAEREQRQOxzFh3FPonHPIdBQiIiLHKSwWY63ANWNERIZ06XwQt01Y gY4duOU9ERH5jftm4LmxGMsxHYCIKMi6d92HWz+zEtlZMdNRiIiInOS6GXiuKsbmrhrZDkDIdA4i oqDr23snbr1lJbKyeAYZERH5RviiCVVZpkMcy1XFmJUIdTSdgYiIjsjt/xG+MHEZcjodNB2FiIjI EQdjBzuYznAsVxVjzSosxoiIXKRXzz2449ZqdO+213QUIiKilMXjHV1Vb7iqGINtuapSJSIioGuX A/jCrdXo22eH6ShEREQpybJtFmOnYgGu+uUQEdERHTo04wsTqzHiqn+ajkJERJQ0GwlXDf64qhhD SFz1yyEiov9jWTZuun4dJox/Cu2yW0zHISIiajMVd83Ec1UxJhwZIyJyvUHnbcZdk5eh11l7TEch IiJqE1V1Vb3hqmJMlSNjRERe0L3bXtw1aRmuGfY6xHQYIiKiVrLUXYM/rirGxGWVKhERnVo4HMeY G1/EFyYtQ4/u3G2RiIjcT8Vdgz+uKsZsTlMkIvKcfmfvwIzblmDEVf+EJWo6DhER0SkpR8ZOzRJt bzoDERG1XTicwE3Xr8PtPJOMiIjcTGyOjJ2SaMR0BCIiSt45fbfji1OfwE3Xr0M4nDAdh4iI6BNE JWw6w7HcVYxBQqYTEBFRaqyQjRFX/ROzpv4V5+ZvNR2HiIjofyngqnrDVcWYre765RARUfK6dd2P SZ+tQ+ktK9E556DpOERERLDUXfWPu4bpVCzl4m8iIl+5YOD7yD/nQzzz3GV4bt1FsJWb4RMRkRkq 7hr8cVVlqKKuykNERM7IisRx0/XrcNeUJ5Hbb5vpOEREFFCcpnga4rJfDhEROatPr12Yems1Sm9Z ia5dDpiOQ0REAWO5bPDHVdMUbUVIOHuFiMj3Lhj4Ps7N34r1rwzG6mevQEuLq76OiIjIp1QtFmOn YllqKdcSEBEFQsiycfXlb+DC8xvw9HOX4eVXLwBXDRMRUXq5a2TMVWFs5db2RERBk9PpEErGrMGd U55E/77bTcchIiIf4wYep+eqXw4REWVO3947ccet1fjszavROYfryYiIyHlu26PCVdMUASRMByAi InNEFEMuaMD5A7dg7YsXY82LlyAed9X3JhEReZi6rN5w1ciYAIdNZyAiIvMi4ThuGP4PfOnOP2Ho he9BeAYlERE5wIK6qt5wVTEGcdcvh4iIzOqccwjji5/G7RNq0Pus3abjEBGR54mr6g13FWMqzaYj EBGR++T2/wjTb1+C8UVPo2OHJtNxiIjIq2x31RvuWjMmOMx9jYmI6GQEwNAh72HweZvx3PqL8OwL lyCR4HoyIiJqPeU0xdNyVaVKRETuk5UVww3D/4FZdzyBgXkfbjOdh4iIvITF2KlxmiIREbWCQt/o 1vXjolW//2YfERQDeMN0JiIicj/lmrFTU9EW0xmIiMjVdqvKt7vu6Xr5zIL6GgBoWD55RWPT2Zcq dBYAnhpNRESnJu4a/HHVmjELaOaSMSIiOokYgLl2uOV7s0at3nnCT1ePim8GZuffMrcKh9t9W6H3 AMjOeEoiInI5d01TdFUxZkMOCHfwICKiYwhkSVztf/titO7dMz234a93fgzg2wNLHnvEVvu/FBif gYhEROQRAhw0neFYrirGYNs7IWI6BRERucObauGbM0bXVLf1hRuXT3wHwKfziitvguB+KC5NQz4i IvKYhNg7TGc4lqvWjIVVdpnOQERExu1S1Xu67OlyyczRtW0uxI7VWD35qcar37lCoXcA4M6LREQB F0roiVPdDXLVMNT9a4a3zzmYc8h0DiIiMiIG0Qfat4R/dFtJ9T6nGx84pqpLIhz/AQRfARBxun0i InK/CLq221Bd4pp1Y64qxgCgoi56AEBH0zmIiCiTdLXY8pXphbX/THdPA0seuyBh27+GoCjdfRER kavsbaye3NV0iGO5a83YEbvAYoyIKCi2AvLdGQW18zPV4dH1ZMW5xZXjBHgAQH6m+iYiIqNcNUUR cNmasaNc90siIiLHxaB4oCXcMnhGQU3GCrFjba6evDQWzrlIRH8IwFXnzhARUVq4rs5w3ciYAjtd N3eSiIicVA/YX50RrX/TdJAPlo47BKDsnOLHHrWgvwL0ZtOZiIgoTYTF2BkJ8JHpDERElBbvA/KV GQU1T5gOcrz3qye+B2BcfsnCT6vKbwH0N52JiIgcpu7bVdd10xRF5T3TGYiIyFE2gNntY6GL3ViI Hath+ZQnYuGcQQL5OYCE6TxEROQkdV2d4bqRMQg2mI5ARESOedW2MXNWYe3zpoO01tGpi9/OK170 uMAuV8hVpjMREVHqVMR1dYbrRsZgsRgjIvKBJgA/7LKny1VeKsSO1Vg96eWGpr4jVPQeAAdM5yEi otRIwnJdneG6kbGWpvC7keyY6RhERJS85fG49eW7i1c0mA6SstWj4puBX/e/ueqPoUT8NwA+YzoS ERElJ5SwXDdN0ZUbF1bUFe4AtKfpHERE1CY7VPDVmWNqHzMdJF3yiyonqeDXAM4ynYWIiNpke2P1 5N6mQxzPfdMUAYgLF9cREdFpiCwOxUIX+bkQA4CGFZMXqaWDBDLbdBYiImoLedd0gpNx3TRFAFDo G4B8ynQOIiI6ow9U9e6ZBbVLTAfJlM3LpuwBMCu/ZOFyVfkdgL6mMxER0RkI3jId4WRcOTIGkedM RyAiojMQWZwliaEzo3WBKcSO1bB8yhOhWHjI0VEyNZ2HiIhOw7bXmo5wMq4cGZME1qg7y0QiIoI2 WoqZ0wpqa00nMW1jfeleALMGlCxcaKv1MKDnmc5EREQnEQqtMR3hZFxZ8kyL1r4OwR7TOYiI6BMU wGyrfftLpkXrAl+IHWvT8il/j4U7XXr0sGjbdB4iIvqEPY1XvfW26RAn48rdFAGgoq5wBaCFpnMQ EREAYKMlMn3amJr/n707j4+qOvsA/nvOncnCDooIhCQo7lZFtBbQypLMJGGpbSXKZlUIaBe3ty5v W+u01rZaW5VaLQSwokkwtFVBskyGxQVcXlHrUouiEAjiguxbmLnnef8ALShLMnNmzkzm+X4+fj4q M8/9JSR37nPPuecssR0k2eWNqBoE5llgnGo7ixBCCACEhY0140bajnEoSTkytg8n5bxOIYRIMxFm umd7++1nSiPWMo0Lxy5323vOBeEPACK28wghRNrTybseRVI+MwYAiug5zfI8tBBC2MKEf5GLSVP8 9StsZ0k1TfNKdwO4JXdExRPENAuMs21nEkKIdKWUfs52hsNJ2pGxdU7z8/LcmBBCWBEm4ru7bOr8 zTJ/UBqxGKxdOH5F466e5zH4NgB7bOcRQog09PnqXb2ScvEOIImfGQOA8gbf4wDG284hhBBpg7Ec pCeXFYbetR2lrcnxP9bPIWcGCENtZxFCiHTBwKNra8ddaTvH4STtyBgAMGGu7QxCCJEmdjHTbZ23 dP62NGLx0VQ/cVVj3djhDJ4KYLvtPEIIkQ4I6gnbGY4kqZuxj5y9dQA+sp1DCCHauDpSntOn+Orv Li2d59oO07YRr60dP0NH6FQAT9lOI4QQbdxHjR1UUm/FktTTFAGgvMH3GwA/t51DCCHaHtoI1jeW +Roet50kXeWWVE0k5vsAHGM7ixBCtDnMv2msG3+77RhHktQjYwDghJ0HAOyynUMIIdqYCiesTpdG zK61NWMf87iR00Gosp1FCCHamJ1e8k6zHeJokn5kDADKG3z3AbjBdg4hhEh1BLxPzD+e5GtI6mkb 6Si/uPJiBv4C4AzbWYQQog34Y2PtuJ/aDnE0ST8yBgARFbkXQLPtHEIIkcJ2AfjfTps7nymNWHJa Uzvu2Q4dPOcS4WcAy4wQIYSI3h5o94+2Q7RESoyMAcCMoP9uIr7Fdg4hhEhBz0Qi6ifXFtetsR1E tEzOyOreTiT8OxBNtJ1FCCFSD/22sXZsSqw5kTLN2KwXRnfUu/f8B0Av21mEECJFfEig6yYX1i+0 HUREJ7fk8ZEEZxqY+9rOIoQQKWL9rt2eUz9bWrrDdpCWSIlpigAw6cL528F8q+0cQgiR/GgjATdv b7/9TGnEUtvamgnPuO2dMwC6GcBG23mEECLpEd2SKo0YkEIjYwDADJoZ8j0PYLDtLEIIkYS2gviP Kiv7/kkXzpdNhduYU0bP6rg7nH0jATcB6Gw7jxBCJBsiPL+mZuzFALHtLC2VUs0YAEyv952qFF4H kGU7ixBCJImdYMyKMO661h/81HYYEV85/upuHid8HTPdCKCT7TxCCJEkmlljwNr6ce/YDtIaKdeM AUB5g+82AL+znUMIISxbB+a/OhFP+dUltZ/ZDiMS60TfnOMijqcMwDUAcmznEUIImxi4dW3tuHts 52itlGzGAkuGeHqHM5aB8E3bWYQQIuEIK8A0TXfdWDX1vBVh23GEZYGAyn355BFEuA6M4UjRz3Yh hIjBy40dPIMxr9S1HaS1UvaEPWOx/wxyeQWATNtZhBAiAXYRUMGkHiwrqHvTdhiRnPoWVZ6lCT8G eDxA7WznEUKIBNhDLvqvCY77j+0g0UjZZgwAZoQKbyKmlNjQTQghovQhmB4K7/XM/uHIhZtthxGp IXdERVfSahLA1wI4wXYeIYSIF2a6fm3d2Gm2c0QrpZsxZtCskP8pBo+2nUUIIQxbRswPdNrS5Z+l pfNSbtqFSBKBgMp98aRhpNT1AI9Ain/uCyHEV9Q01o4dmUqrJ35Vyp+UZ9cUd3e97huQzaCFEKkv zMBjjqPumTSsbqXtMKJtyfdVnsoObgEwAYDXdh4hhIhRU2Rv+Jz1i37wue0gsUj5ZgwAZgQLLyai RQAc21mEECIKYQbP1Yw7r/E1vG87jGjb+pZU5DFwEzNNgWwTI4RITRpAYWPtuMW2g8SqTTRjAFDe 4LsDQMB2DiGEaIVmAma50HdPLQyttR1GpJe+JRV5LuM2Al0FWQxLCJFS+JeNtePvtJ3ChDbTjAUC AdX7wuVPgTHKdhYhhDiKvQyeQR7P3WVDa5tshxHprXfx4zkepttAVAYgw3YeIYQ4iqcaa8d+L5Wf EztQm2nGAGDWC6M7urt3v0Sg021nEUKIQ2AQ/Z0i+NnkovpVtsMIcaATRlXnuuHwb0A0AW3s+kAI 0WasdMKeCz4MlW61HcSUNneynbW46BTt6pcBdLadRQghDvAiMd882dewzHYQIY6kT9Hj5ytS9wAY YjuLEEIcYDtDf2tt7YR/2w5iUptrxgBgRrBwNBE9CUDZziKESG8M/rfSdNtkf3CB7SxCtEafoqrR ivj3AE6znUUIkfZcaPpOY/3YhbaDmNYmm5Upvob5BPzUdg4hRFr7jJlv+MgTPlsaMZGK1tWNnd94 wXtnMvgHADbYziOESF/MdFNbbMSANjoy9oXyoO8BEK6znUMIkVZ2AbgnU+PeK/zBnbbDCGFCD9+c 9tke52Zm3AxQO9t5hBBphPi+xprxN9mOES9tcmTsC+uXD7oR4H/aziGESAsMonmkPKeXFQZ/JY2Y aEs+CV6xc03N+IDreE8G82MA2sQqZkKI5EbAgsb23ptt54inNj0yBgDTF4xqp7KaFwH4lu0sQog2 ivGKUuqGSQV1L9qOIkQi5I2oGkRa38+g821nEUK0UYzlbkdPQdO80t22o8RTm2/GAGB6Q0FnRWoR GANsZxFCtCkfgfCr9S8MmhkIBLTtMEIkFlNuUeWlRLgXoFzbaYQQbQjhX67rGdZUX7rJdpR4S4tm DABm1xR3dz3uEhDOsJ1FCJHy9oLxV9Uu6xeTLpy/3XYYIWzqNWpBuwx32y3MdAuAbNt5hBApb6Xy 4uLV88d9YjtIIqRNMwYA5aHhPcDOUgCn2s4ihEhZz0DjujJ/cLXtIEIkk97Fj+d4mH4rm0YLIaJH q6Aj326sn5g2K7im3clyekNBroLTAPDJtrMIIVLKa6Rx42R/8DnbQYRIZn1LKr6tme4H0N92FiFE Slnpcanwg+DYdbaDJFKbXk3xUKYWhtZGNF8E0Ou2swghUsInIExev2zQ+dKICXF0q2vGP9d4wXvn gXkygLSYZiSEiBW/5oXnonRrxIA0HBn7wl+WDOmQEcl4EkCB7SxCiKQUBuNhTfqXUwtDW22HESIV /Xd/MroVQJbtPEKI5MPAc56wZ/SHodK0/KxN22YMAP60fGB2xx0d54Bwqe0sQogkQnhSk3Pz1OG1 H9iOIkRb0Kd47okK+l4Al9jOIoRIKn93O3iuaOvL1x9JWjdjAMAMKm/w30LEdwFwbOcRQlj1LhHd NLmgvs52ECHaorwRFUPBdB8YZ9vOIoSwigl0z5oLVv4Mab41TNo3Y1+YGfIXMbgSjK62swghEm4T M/+6y5YuD5aWznNthxGiTQsEVO7LJ00g0B8AHGc7jhAi4bYR8RVrasY/bTtIMpBm7ADTFxWfqLT7 OIBv2c4ihEiIMDH92fE233nV0KVbbIcRIp3kX/JIF92ceQcBPwLgtZ1HCJEAjOXQkQmNwStke5j9 pBn7isCSIZ5e4cz/IeI7IR8OQrRlIXbohinD6t+xHUSIdJYzsuIkj1Z3MfMY21mEEHETIdAfj/l0 2+0rVkwN2w6TTKQZO4xZoaKBmvlvsh+ZEG3OG0x085SC+pDtIEKI/+pbPNenSd8jz5MJ0easJOgr 19ROeMl2kGSUdvuMtdSkgroXddeNZzLTbQD22M4jhIjZehCmdt7c+TxpxIRIPqtrLw82fvO9c5m5 FMAa23mEEDELE+huL7qcLY3Y4cnIWAvMCA0/mdjzEMDDbWcRQrQOA9sU4ffb2m2//6ZBL6bt0rlC pJJeoxa080a23wjgVgAdbecRQrQOg4PawY+bnhn/vu0syU6asVaYEfIXEPMfAJxjO4sQ4qgiAGaD 3F+WFSz6xHYYIUTr9R7+6DHejIybGXwjgAzbeYQQR/VvZg6srRs/z3aQVCHNWCsFAgGVc+HyiQAC zMi3nUcI8XUMPOU46rZJw+pW2s4ihIhdvq/yVDi4m4HRtrMIIQ5pDYPvWHvB+4+n+75hrSXNWJQC gYDKGbh8BCu+HaDzbecRQgAAXmXmn07xNTxrO4gQwrz84se/xXDuBXiw7SxCCADAmwz+49rdvSqx dGjEdphUJM1YjJhBsxb5/cz8EwB+AI7tTEKkoXcZdHtZQf0/icC2wwgh4okpt6jyUlL0azBOtZ1G iDTkAlSrQH9eXXt50HaYVCfNmEGzgoW9XKiJRDwFwAm28wiRBtaCcNd6Z+/swNClckdOiHQSCKjc l076PhH9DsCJtuMIkQaaCFRBpB9eXTO+0XaYtkKasTiZsdh/Brk8BsBlgNy5E8Kwz5jpj15v8wNX DV0qW08IkcYGDJju/ey4DlcRKACgp+08QrQxa4h4vtY0b+233lsuz4OZJ81YAkxvKPgGERUSaAgY FwHoYjuTEClqE4A/6D2Z06aOWrDLdhghRPLo4ZvTPstxrgfopwC62s4jRIraQsDzYFriOtSwbuHl b9sO1NZJM5Zg1dVjnG1dN58FpgEM+gYUzgTjbADH2M4mRBLbAeABj2fvvVcNXbrFdhghRPLKHVHR FS7dTITrALS3nUeIJPY5gH8R8dsMegtMKxo7OG9iXqlrO1g6kWYsSUyv8/f0KP0NV+FMYsonUB6D cwHkAuhmO58QluwA8HBE495r/cFPbYcRQqSOvqMre+gwbgZwDaQpE+lrE4C1BKwF8RoGNSpWb2sd fquxfuIG2+GENGMp4S9LhnTIdrNzI+TmKeZcMOUwcBwDxynQsQw+FqBjAT7WdlYhTGBgGwF/1p69 908dunSj7TxCiNTVa1TlsZ4wbiLCjwB0sp1HCEM2grARjI0ANhLoUwY+Ieb1TGotI9LY7OrGT4JX 7LQdVByZNGNtSHX1GGdrt03HsvIcSy51J3a7M3AcgM4M1ZmIuwDcBVCdAe4M4MB/5K6hsI+wGYwH ws3eaT8cuXCz7ThCiLYjd0RFV8W4npmugzxTJpLDTgBb9/1DWxm8VTG2gmgLwFsYvBWktrLWnyqi T7XGRicTG1dnejbKVMK2Q5oxAQAILBni6es6nV1Q5zA5XTyMzq7WXQB0JlL7mzh0JqIumvf/O2jf /yd0AaML5OdJRI02gvV92RHPgxNKarfZTiOEaLv6FT/eKQz1EwA3AJAZJSJaDGALgM0AtoKxBYSt TLQFzFsU8Vbwvn9nqK1w9BZEnK3scbfoiHdVR//pAAAgAElEQVRLU3P3bbJJsgDk4lkYNL2hoLOj Mrow687k7mvkQNRFE3cB8bEE1QvMx4PQE4ye2Ddq57EcW9i1noD7mz17//qjoUt32A4jhEgf3YdU d2iXFbkWhOsB9LadR1gVAfApgI8A+hjgDQRs0MBG7G+uHOKt0LSFvJ4t2I2tH4ZKt9oOLdoGacaE NYFAQOVd/MpxOhzOcRX1A6MfMfoBOAmEftjXrIk2iV4H6z/pbpuemHreirDtNEKI9HXGmOqM7Tvd y4j5RgD9becRcfMpQO8zeBUB7xNolVZ6lWLv+jU1Yz4BiG0HFOlJmjGRtKY3FHR2tDqbFZ0LcH8A 52LfBtoympaaNIAaRfSnSQX1S2yHEUKIr8orrhwGwk1glECukVJVhIB3AbyuiV9zgNdpr/dfMpIl kpWcaERK+dPygdntt3c8SykaCPAQABdC9mhLdruZMcfxqPsmDatbaTuMEEIcTb6v8lQ4dCODJwLI tp1HHNFGAC8Q8CxpXh7u5H2raV7pbtuhhGgpacZESmMGzQr6zmDii0F0EYCLARxvO5cAAHwAYKb2 7J0py9MLIVJRv+Lq7hGKTGbGZAAn2M4jAAAbGPwcMT3HjGfX1o/9t0wxFKlMmjHR5swOFZzgQhWA MQpAAYAs25nSiAtgCTHP6LSlyz9LS+fJ0rtCiNQXCKjcF08aphw1hZkvAeC1HSmNRAB6maEXQCG0 duG416T5Em2JNGOiTfvT8oHZHXZ1GqygCxhUAMa5kJ978xhNpLhCufSXq/3BdbbjCCFEvOSXVB+v OfwDAk2BjJbFy4cECmnWoeyMPXUr50/abjuQEPEiF6UircxcVJKnOTKCGCMADIU8CxCLXQA9rVj/ bd3ywaFAIKBtBxJCiIQJBFT+SycVMtGVAI8GqJ3tSClsNwiLiXkhEWpW14xvtB1IiESRZkykrS9G zUjzKBAuAZBrO1MKcAEsAegxlZ355KQL58vdSiFE2ssZU52tdkQKFNFEZv4OgAzbmVLAJ2AOMrBA Rr9EOpNmTIj9ykNFZ4F1CYFGMHggAMd2piShASxjpspMFZn3g4JFn9sOJIQQyarXqMpjMyI8BkRj mXEh5FrrCy6AF4mxMKL0wqaaCW/ZDiREMpAThBCHMLPO342V9oNoJAA/0m/5/GYGniXm+Q7TfHkO TAghWu9EX1WfiAejmfVoAg1B+o2YfQ5wPTE9E2FPfVN96SbbgYRINtKMCXEUgUBA9R64vD8URoEw sg0vArIJRIvAeEbDfXpqYUg2yBRCCEN6jVrQzhPZPlyBRjJ4NNruNiwfEvEz2sWC7ht3PLtixdSw 7UBCJLO2eEEpRFyVLynOITcygpmKGRhKQCfbmaLkAvwaQItAVNN5U6flshS9EEIkwJhqp882dxA5 XEKMAgD9kbpT47cx0xKQrnHBNetrJzTZDiREKpFmTIgYBJYM8fRxs87XrIcxMJyAgUjefc2Ywe8S 6FlAhcLNzpIfjly42XYoIYRIdzn+6m6OExkKpuEAXwzgNCTvNdoeMF4EYRFBL1qzu/erWDo0YjuU EKkqWX/RhUhJX67QyDwEoPMA7g/gOBtZGNhGhDfAWM6EZSpCyycX1ct8fSGESHI5/upuSkUGK9Bg DR5IwDmwNwvjUwZeJ+BV1rxUd/Iua5pXuttSFiHaHGnGhIizhxcN6+2wpz+B+wN0MjHlM3M+CD1h ZlrKLia8Txrvg7CSmP8FrV6f5K//gAhsoL4QQgirmHL8j5+oSPUnUucAfDIIJ4H5JEP7m7kANgBY A8IaaFrJyn1Dq4zXm54pXW+gvhDiMKQZE8KS6a8O8Hq3dO2jXZXDDnqC0Y2BYxRxB0BlMfOBG1Jv BVGYNbYp4s804VNH88cAPprka/jI1tcghBDCrj6FVb3guL1JoQexcxxDdyeoTiD2ElPn/76SdwO0 hxk7iPTngNpEpDe4hKbuH+9YJwttCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEII IYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGE EEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBC CCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYRoKbId QKSfE0ZV57pu5Hww9QXrfCjKB+N4AF0B9gDU8YCX7wSwF8AOgNcD1MRAExGtogi/tmbQe+8hENBW vpAkM2DAdO/nx3UcBKA/E50J5tMBdAPQEUD2/pdtJmCnBm8gprcB/jcTv7i2dsK/rQWPg76jK3vo vTSIwKcw4QSATwCoB4B2ADoBcPa/dDeAPQC2EbCOgTUgrFGs3nYQeWlV7YRttr6GZHGir6pPWPHF RHw2iM4BoyeAzgDa73sFb2dQMxEaGfhAaVrpwn1hXceM1zCv1LWZPREeWTIkKxzxXExQ5xHQXxP6 EXMXEHUCADC2MGEbMW9m4G0Fel0zv/bR8sFvBhJ07qquHuNs7rp5oGLnAhCfC8IpYHQDocv+jPt+ DwhNzFhFwPsa+gV03fzy1PNWhBORMV7KQ8N7KK3O1kQnMnACgU8AUw4InQBkfvk92I+ArZp5F4E+ A/gTgD4C8B4xvxdmeutaf/BTO19J8ni4tijf4+FvE+NsJpzN0D0J1BmEdgAAxnaA9oC5EYRVzLyS HfX8hucHvpGon/lEmhUs7KVJnc+kTySmfBDy958n913TfHEuAP77uwbsZGADMdaBsI6YPtQevNZl Y6f/lJbOa/PnTXEwacZE3O27MMb3QRgGYCCAXgbLbwNjBQFBgvvk6rqJKw3WjlrOmOpsZ3t4KAjn MNCHgE4g6gxGewLCINoCxmYGfwbwu154g6tqSz9r/ZGY8vxzS+Dw5WCMANA1ysgfgHg+yClvXHj5 u1HWsGfIEk9+uw0FDLoUzBcBONlAVU3AO8xoUMxPrK4f/4qBmjHpV1zdPUxuIZhPJ1B3ELpq5k4E ZAK8m4FtCmorA58w8duevZ76D0OlW1t7nPyS6uOByFXM+B6AAYjus2IrCAsUuHx1zfjnonh/VMrr fX2JeDgTnQxGNyjqBOYvbvTsALAN4G0ANRHojV1hFbqupLa5NcdgBpUv9hWRxgQGRtK+Br+1PmLw P4lUdVlB/fNRvP+oykP+i5j5SgKNBvjYKErsAFEtaT19UmHDYiKw8ZAGBQIB1evClwaQdguJ1AXM fC4IOWaPQu8BehmI6lVWVs2kC+dvN1u/dcpDw3uQ9vg0+DRFOJaJujBzZwJnENNuJmwDsJWJPwar N53szGA0mcuXFOdQWF/NhEsA7h9l3E1ENJ9AMyYV1L0YZQ3rHg0NP6aZPd9TzMOYMAhArsHyO0B4 DeCQZn5qamHoLYO1RZKSZkzERf6QR7I4K2M8iMYBuBj/HYmIt3cBftzjujM/CF5h5Q5mXnFVf4Cf Qeuazj3MdOvaurHTWvLiAQOmez/v3nEcE24GcEZUQQ9NA3iaFd+1duH4FQbrxkWO/7F+jvJcD/Bl ALrH92i0isHlGdB/tTFilltUdR0R3wMgsxVv2wKocY21l9e25MV5I+aeBq1/CmB8K49zZIR/gdX/ tjRHNJhBM0OFDwE0Fa36bONGhzwlVxfUHnV0OBAIqJzBL17O4FsBnBV92oMR6AUm/MxEU8YMKm8o vISIbgHwLQPx9hfGO+zg5inDg3H7O4zGtJrizCxPpIRIjQFQGGXTGa09AOqZ6MGy4fWLEt2sljf4 bgPwKwAZrXjb54pozKSC+iUtOkao6CxA3wzGZQC80eQ8jFehcWuZP7jYYM24qa4ek7G1y5ZSAo1j QgHMfi+O5AMmrnA0pk/yNXyUoGOKBJNmTBiVf8kjXbg544cAXQegh8UozQQ8ARe/WxMc959EHjiv uKIGoOIo3hp2wp7uRxvJyC+q8LOi+8E4NcqILaEBesgL9+fJOFUv1195Bin+GUCXIXGN/he2gPnB rIw996ycPykhd8X3/V5lfgbAE8Xb32usHXfKkV6QO6KiK2n1a4CvifIYLcJAvXY8k5qeKV1vuvbM YOFgJnohqjcTzSsrqC890kvK630DoPhhgM6P6hgtzEERXDO5qH5TNG+fESw8h4imAbjIcLIDPRPR mGR7ut6MYOE5pNSPwHwpcPBUQ0veIqJbJhfU1yXiYNPr/D2Vw00AVOvfTa+XFdafe6RXzK4p7q69 7l0MTIruGC1MAprPFJlSVrDok3gdIxZ/WTKkg9f1TiFNN5ofZW2VMIB/MvPvp/ga3rCYQ8SBNGPC jDHVTt6O8LUA3Ynk+GD8QpiIH9aEwNqF4zcn4oB5xZWvAzgnmve62j2pqX7iqkP9WX5J9fFau38l 4u/EFLAVGFjHrL+/rm7C/yXqmEfSwzenfZbjBAC6AXFsGlqoCUzXNdaNfTLeB8oZWXGS49J7Ub59 e2PtuMNOo8srqRoH5gcAJGpEYRMDV66tHbfAZNHykO9SMOZF814CvTC5sP6QDcy0muLMbK/7ewA/ QSIaf0YTgydM8TU829K3JDwj8DEzX96ajKbMDBYOZ6LbABQk+tgt9HREY0q8m9X9jffrUb7947LC YM/D/eHMBt/VTLgXHPW099b6FIQJZQXBhgQd76j2jbT7rwT4bsR91kWruATMZnJvT9YGVrRe3O52 iPTRp6TqvLwd4VcA+jOSqxEDAC8zXUeaVub5q0bYDhOt3JLKQubIG4lsxACAgD6K1LP5RZVjE3nc Q+lbUvHtLMfzNkA/hf1GDAByQPzPvOKKil6jFrSzHaa1evjmtM8tqXoEzBVIXCMGAN0IeDKvqGJS Ao8ZldmhghOyM9xlAG5AokZgCTlEVD+zwd+i89X0et+p2V73JSQyI3A8EdXNCPlGJuh4KA8VnTWj wVfPRCEkbyMGAN/xEFZMr/ddYDtIaz1eU9xpZtBXxcCsBDZiAHAcGAtnNvjHJfCYhzVjsf+MmSHf 8wDPRnI1YgDgMFAGdlaWh3yX2Q4jzJBmTMQkr6TqR4p5GUBHnPKQBLpD8YK8ksp7MGRJMlzItxBT fnHlr4hRB3vTPrOZUJFfVHGlpeMjr6TqR5opBCDfVobDo3Fed/vyPN+cvraTtFSO/7F+WR7Pq8R8 paUIDojK84sqf2Dp+Ec1M+Qvclm9BsYAC4fPZPA/Zob8RUd6UXlD0feUwquIciQ+RlnE+OeskH9o vA80o8FfBtb/R4Av3scygpCjFBaX1/uG2Y7SUrNDxafv9rqvMeFySxG8DH5sRoP/iNOF421m0D+R XH4FwGCbOVqgMxhzyxt8Dz2yZEiW7TAiNtKMiajkD3kkK6+k8jEwP4jWPTxsE4Fxc372hn/0K64x tzhBnOQPeSQrr7iqkoFfwv7vKjHRzLySyu8n9KiBgMotqfzr/p+zRD0w3XqMs9nxPJs7ouIE21GO Jm9E1SBHOS/G+ZnDliAmTM8tqrzQco6vmdngu5qZF2DfEv62ZDJz1cxFJXmH+sPykO86QM/Dl1sM WOHVjOqHa4vy43WAmSHfDwg8A6nzOfOFdlBYMGORb5DtIEczK+Qf6rK7DMCJlqMoAv9tVrDwvEQf OLBkiKc85H+QiecASKWZDtdG3IyFc+p9Ns8DIka2L/BECtq3UmLmk2BMsJ0lGgyMDmPzk/lDHkna u0m9hz96DGdnhQBrdykPxQHznFx/pcnVG4+AKe/lk/9KjKmJOV5sCOhDmhafMKra5DLHRuUVVX0X mhchsdMSjySTCFX5lzySNNOby0OFtzMwE8kxFbYL60hFYMmQL7Mwg8pDhfeC8QCS4jOcj/V49JxA IGA8y6wXRnfUjBatMJuk2pGLJx4NDT/GdpDDmdngH6eZ65A8jxhka6K5iWwuAkuGeHpHvBVg/lGi jmkUY1izQu2sF0Z3PPqLRTJKghO5SCX5Qx7J0tmZT4FwxOkzyY+KOTvzMYCTbhGbE31zjvNkeBcD nITTJKgdKczNGVOdffTXxiavqPLPAMrifRzD8txI5KlEfH9aK6+o4nsgfgJAst2EyEFz5gO2QwBA eYPvDjD9Gsm1uNXgXm7GlC/+Y2aD734w/Y/NQIdwUa/By41fyLp79oyIcg+35EHI2cvqIdsxDmVG yDeewXOQfKOOJ+5VuCcRBwosGeLp7WZWAmR1eqQBF7m79/y9unpMolcXFgZIMyZagQlZmY8S4Led xJBL84rm/tx2iAMpco6POJ4lMLiHURyc6dkRCcTzAHklVT8CUWrepQT6q51uUl185RdXXgKiuUjS qZ4MTOzrr/imzQzlwcKfAQjYzHA4xLj9L0uGdJgZ9P8RhOts5zkUAgLTGwqMTusk0DdM1rOHxswK FQ20neJAM0K+y4nxKBK/NUiLMDB1xmJ/3Gdh9ApnPAjmMfE+TiIQ4NvWZVtCmlhhljRjosXyiqt+ xoRUv3t0MOJf5ZZUFtqO8QUi/APA6bZzHA0DN5xQMvfkeNTu46+6CMz3xaN2ohDzlfnFlZfYzrFf OwaeQJI2YvuRVvitrYMzeACI7rJ1/BY4PiOSsZyJb7Id5Ai6KagbjVbkhG7gHE+kme+0HeIAxxDj MSRpI7afQy7H9XeyvMF3LVFqTINvKSa+qTzku9R2DtE60oyJFskvqSwC8GvbOeJAEeOvSbQ0+XG2 A7RQhsv8R9NF+xU/3kkproL5xqGZCa+A+TEAfwbxfWB+DPtWqXwXgDZ8PGhgWvch1R1M142Cg+Sb hnQINLxvUaWtEeGkm1Z6CKkwSnTN9FcHmPvdpZRaSOEoeNiM0PC43MCKghfJ8Uzk0YyeWefvF4/C +0cqk2J6tHGMPz/0zIhEbk0gYpQKv4zCslNGz+q4O4wZ1Hab9xMyItvvAHCr7SCphUfmFVf1b6wd G+3Go18ThnMXwL1N1SPC85ppWrMbrv0keMXOw70ux1/dzXEi3wPjJzA0RZSAPu2ywrcCuN1EvXSg FaYCSNXpqQLoQZu6jQRgZCN01thJpp7eI2wGYwWATwFsJeJtWqvNSrECuLPW1AWEvkQ4P057bBGx cxWA/41D7baK2OEpAG4xWXRaTXGmZncWknu2QCyO92ZF7kHqPXOdtqQZE0e1J9zutwTuE4/aDLwP YCERXmAX/4GHP+r+8Y4dG3r18tKuTdneTE8faJzKii4CYwSAQy7zbCDHDSf6qh78IDh2XTzqt118 CwAjG0L39Vd8U4N/aKIWgM3EfNOa2vF/a8mLm+pLNwGYCfCsvOK5VwF8H0wsHED0437Fj/9hVe2E bTHXSgeMUoypvg7zSl3bUUR0iOhSGGrGAGyK4b3rCXiaCUvgYkWZP7i6pW+csdh/BkV4ChOuNLyA yAhIM9ZalzPjViKwqYJZGZGfgek0U/UORIQ1DFrIjOeI6D/hPc66Y3e129mU0+R02tYpmz2UQ9o9 BUoNZuaRiNd2AsxXzw4V33d1Qe2/41JfGCXNmDiivkWVZxm8QP4SA89B851r68ctAuigk+xaAFiB MIBdAD4H8AaAuQgEfpL30infgeI7wDjbcKSMiMM3A8n5cPxRNIH5aQJegFKrI27kc0d5OjI4h5i/ yUSXEXBSnI49pk9h1f+saxj7UayFtKLfwczo69s6Qv51DeOiyETcWIvZfYsqX9WEegDHx5ily16o a4DErAxm2GcAFhHwHoDNDOrGhD4E7h+H378vHJu7Y++31gLL4lTfpDDAy4jwMpg+YaATAb0BnMXA +UiOmQR7QXiBNV5WhM8Y6Ayi3mA+B8AAxGXVSPIFAgEVCARinvpLRJ+iddfgnzMwhzWemOILvhLt BfyUYfXvALj+0dDwX4fZM5vBo6OpcwhnzgoW9prka4j5fGnJJwBCAFYBtAXgY8HIAXAeCPFabKPP 7Hr/WUD9v0wUe7i2KJ9Y32ai1kEYr7DCnetfGFRzhJ/93dh3g+FNAPOYceOsRX4/mAMMXGA4kXLZ /RmQmlsQpRtpxsQRaTK+4fA2Yp7aWDd+7r7/HN/ydwYCuhF4EkOWLMjL3nArgF/B7APIk0/0zfnN B8ErPjVYM34I/2LoX6xtn1F7mJGE1wEsAPiX+cVzSxl8P2JvLr7KUR4eCyCm58dyix8fDGCYgTzv EXkK1zWUfhxLkdV1497MHVExkjQ9j9ifJ5qMlGrGaJlm3LPuWyufwWEuKvqMmHumo/VdDJi6SP2S gvIhuZux7QQ8oMLOtKtLaj871AtmhIafDHbuJsDWIi7bwXS/9jZPmzp06cZDvWBmve9MJvwJBMML GPGxPS968RwAr8VcSfEr1JKWbt+F8DSvs/cfVw1dugeAkVUZflCw6PPq6jHf29pt65NgjDJQkrSi QQD+bqBWAvFSVnRP2bBg3eEa3JkNBf011C/j8TPPHu0HYKQZ83r0z9jsM7S7iPn6SYUNs/Z9b4It fuO+19fXBQKBYO8Ll/8YjD/AbLbLpi8qvmPq8NoPDNYUcZAMd+5Ektr/MP33DJZsUux+c82XjViU lg6NNNaOu4uB7wJoNhMNAJAdcTwTDdaLlz0A/6SxvWfA2poJzxx9ShfxmtqxTxB5+gN4MQ55Yr7z Rqx+YSDHTriRojU1sTViX1i7cPwKgH8Tax0CTrK9bHsLfU6MKxtrL79oXd3Y+YdrxABg3cLL315T O+47IBoP4LDP4kWDweebrGcUYXEkos6aXBi8/XCNGABMKVj03pTC4HdBmAwgnMCEAGExKc83ynz1 vzxcIwYAk/3BtycXBv0E3ATDC9go5vNM1OHOn/8fgB2HfQHhORB8Zb7gBVMKghVfNGImlZbOc7Wz 92oAZkazNCXztiVf9QkTxpYVNgydMjxYe6SRxsmFodenFAa/y6DLABj9e2BNRs4JD9cW5TNwpYla +9BGgr5wsq9hZizTKAOBgC4rCE4DlB9H+nlvPY9ifbXBeiJOpBkTh+USboK5aSybXYeHra6buNJQ PaytHbcAhPEweyGR3EP6RKuh1eDG2vEPtva5mjU1pR9neXf7AbxsONU5eb45faN9c55vTl+Qkb3r 7mgMXtHi50JawouufwQQ88+sdnC5gThxw4RXPC71X1M37tGvThs+ksaasZW875lBg894kZEL+Tj4 Q+dNnX3XFtetaekbygqCswBcH79IB2Ome9a/MKhw8vCaxpa8ngg8uTB4H4BfGs2hca6JOlPPWxEG 0yG2uaB6EH27rCB4cVlBsMHEsY6YY+jSjcxsZnSbOFWasee1S/2nFARbdfN0SmF9NYN+AIOfy6Rg 5Jzg8ejrYW7Rjp3MunByYcjYAlZlhXVLmei7MHkDh/V45qTaxF4cgjRj4pBOGT2rIwHG9qrQTFc2 PTP+fVP1vtBYM+4fIDa5PO05fUbMPdNgPWMIeAtuZHBj/eVRT/9ZOX/Sdh2h7wE47B3zaLDjLYj6 vcp7BWJv+v/duLun8WWKV9WWNLOJ/aeYkmYvu0OYm8Fdvh3t4jVra8ctAFPAYJ7uvUZVJtP+UhqE qWWFwVtKS+e1uuksKww+TIzYZgMcnQvC5Cm++lujeVZrckHwtwDqTIUhwqmmanXe0ulXAH5BjFow phH0uWWF9UVlBfXPmzpGS7A3XAFgr4FScVmEyix+RHf9fPjUovoN0bx7SmF9NQxOzWZG3vQFo2La 5qC6ekwGWvVcxJER4UdTfA1vmKr3hSkF9SE2uo0Q5c0K+i4yV0/EgzRj4pD2hNuNAdDeTDWuXFc3 dr6ZWl/ntvf+nAFjqyA6rh5pqpZBK7TiixvrJ0b14XigdQ1jP2Lw/5gI9QXFHGUzxkTEsU8NJXoI S4dGYq5zCLt3OU8i9ql4Z5zom5N0e8gxYXrjBe+NX1VbEtN0X9qz516A15rKlbmXTzBVK0YuMV1Z VhCcEUuRCPiXAOLy87mvPE/cPwoXFSKwhr4F5kY4je0NVVo6zy0rDN412RcsKfMFrzc5EtEa+6d8 mphV0NNAjbhh4L7JBQ2Tpp63IqbRGZWd9VvsW/DDBKL2e6OefQEAW7psGQGgu5EwjNrJBcFHTdQ6 lI88e3/PYGOrILJCMl7TiANIMyYOjdnUdL09ruM1ukfIVzXNK91NzL8yVpCMLCRh0kovPMVrF47f bKrg2gvef5yAt0zVY0JUd97y/E/0R8xL+/IuythTEVuNw/tsaekOgJ+OsQxFlHOhkUCmEM1aWzP2 2iM9G9ZSa5ZetYeA35uIBQCsVEwXXqYQ6IbJvvrHYq1zja/hfQD/MBDpa4j52jJfQ1WsdaYWht4i 4CkTmQD0CiwZ0hYXCFthoEb36uoxJheeMocxbUph8CYTy8hPunD+dsS4sNNBNMd0TiAy9giC6zJu MlTrkAJDl0YIjsn9KYcarCXiQJox8TXdh1R3AGGwiVoM/lvTM6XrTdQ6EtqztwKGpt4xePAZY6pN rmgUiw2K2L+qtvSwiwVEJRDQAD9ssGLP/JLqVq/USI428KwY1ax56qotsdc5whGAF2KuocxsJm3I 3xvbO1Nb83zY0Xjg/TsMjaxosPVRRAbum1xY/6CxemSs0Tmw6J2TfQ0zzRWkf5oqdNzO9h0N1Uoe hLcNVFGfdf8s1hVa42HO5MLgDSYLkvJUo5V7ExyOYkR9Tth3Y4CGm8gB8D+m+oP/MVPr8NYv+9ZT AEw9A93/kSVDuhiqJeJAmjHxNdnt9g6BoeVVHSaTF/yHtf/OvKHREWq3c6fb30ytmOyFoktX14xv 0cP4reUBV8Dgyk3MutXfM+bYF+6gBCyD7moV80UYQ33DRBYDXqbdzRNNb6y8qrb0MzCeM1FLER9j ok7UCE9+tGzQT02W9Dp762B2quKcyYX1dxish73NnloYypiRzSY3S04KrDmWTai/5AGyTNQx6Fnd 9fPJJjdWBoB9C8nwqyZqMRD1OSEn7IKz59gAAB5mSURBVL0AQGcTOQAnIdc0gUBAE9PfDJVzwjpj oKFaIg6kGRNfo2BssYE3VteNe9NQraNjMnbnmcHWL5wJfEPjwrHL41V/Ve2EbUxkbL8bhm7V96zX qAXtAAyK+cBE8Viu/+BDeHTsd8SZTzMQJVZN0O531yy9yvgS4ADAysxKnczUzUSdaBDwst6dOcHE psUHumro0i0ATC1iFIrHxfMPRy7cDFMZI5E2NzJGTFtN1PHuzUimkbEPtWfvpbE+I3Z49JKhQlGf E5hMXdNw4+SCumfN1GrB0ZS5axrSSMqFycQ+0oyJr2FWpvZEWmioTosc89m2Zdi3w33MmHGGiTox aFpTOz7ud+CU5qXGaoFa9exXhrvjG4h9mWHGrj1GNgM9kv3P68XawOSYyBIbnmtiEZjDIWYT07gA jnmj7RiOTX+ZOmrBrrjUJjL0/eH74nbxbCij9qDNjYxBaSM/F9rjSaZrr8eOtB9drIj4HTN1KOpz AjGMXNMQaKHpGyBHUlZQ9yaATw2Vk2YsiSXTCUEkg0BAwdSokOJFRuq00IoVU8Mw84A1iKyfuIxO ITuciIeNjbwxtW4FPM36bAOH/TxeozyHEOtxOp4yelabGy04EBEMXXiZmSadbBj6XdsZjsZURsfl DibqiNTmsplmjJmjPicwwcRnDRgqodc0AEDmRhZtX9OII5BmTBzkhFdO7QdDS9p7tdfMXeBWISMr BDKnwl4wsWt6ZtwqAGYWB2G0arUrIjLxAfmxgRotFXPTt7M5o4eJIMnKDSsjd3HZ0DOryUYxxXWh GRNMZXQdSs4VA0VCMZOZ5e05unPC7Jri7gB6mYjgsmtsBeKWYrCpY+YbqiPioC0uPStioFmfbqjU p8ZXAGwBYrzPBvaapyTfC8YcYqDyLcDIcv6tW+2K+ZRY93pmoHN+cZWxJdWPfCyOadNRAPCSE3ON ZOZlZ6drYP0HorZ5Ic+EbYmb5BSdVMgYb9NqijM9Wn39d5XCHdP9e9NaXmCniYcvSUV3Toh4IqdT jJ8z++3ptqXLhyYKtQYzryIykr/rtJrizOtKamPaU1LEhzRj4iCaKZdMrHbNZqYrtZYmXmXktAV0 6D6kusO+PabavDWG6nTsV1yT2fINhKlPrAckoA+Db421TsIwJ9sqakZ9GOm+M88bt0fS2oJUOJ+k QsYW+9Pygdntd3c6Tbn6FFLow5p6MyGX9q3O1wWMriB0AHDA0t8uDjlTXBqxVosQ71RmmqHoKMox 9Pf2XmnpvIQ8PnAgJl5lqJmkTG+4B4C1JooJs6QZEwdRhF4mzlus8J6BMq2mXLzPhu6pZ2Y2Hw9g lZlqSYxptantppojW48B8FELX25k6kgqYRX9Q+gpYenQCIormwFk2o6SjEizZjN3ueMmFTIeTiAQ UL0vfOlMaPcikBoM8PnYib4AOyACMwD6ynh8an6pKWODJ7Kzd8TerGMF7s0m/pKJVsZepPUU8fum bgI4yjke0owlJWnGxEGYuLeJX3xbz0ZoL39C2synqxdeI8/OJTtWaDS19a/Hadly1icUVHd2EUm7 B/xZcTo8pxuGNGMiQQKBgOo9cPkQUvg+Y/klYPQCEWQYKzkEhi6NlDf49rfBiceaehvpxZitXNNM Hr7o05khM98/YjPrAQjzpBkTB2M+xsQ5k9nOVJeO7bw7d+wws68qO217StkXyMUmU0v5hBW16Bao 9rhdzRxRCJGOpjcUdCaiScTLfwjgRGm9xCFR9JtFH0hbmr5LBC5vwC4YWFjNpfS4pklF0oyJgzFl mRnRZysnrnfmle7NK64MI/b9q9r88z1f8uhdMDSaSHBaNCLCDmVDp9/lE2kyupGwEOlm+qsDvGrL MVeB8RswutvOI5IdZxsZlCPeHnuRqBlpxuCmyTVNCpJmTByMKNvE9A5mqyeuHQBiHnkhqLSYaqUi 2KkNjYw57LbsnKLT80OBQdtsZxAiVc0KFQ3kzfpRBk6ynUWkCjLyWUNMNhe22QEYuPGgKC2uaVJR Ojy/IFrFzEUyE8Im6kTJyNKtGtrMfMckF3Z4t6larD17W/bCtGzGmJVeYzuEEKmGGVQeKrxds35O GjHRKgRDnzVk85om5j0uAYBgYN8RERcyMibiQhnaODpKZvZyYhhrUpIZaRiZmgoADN2iRphJK0q/ e0GvrV04frPtEEKkkurqMc7M0NaHAEyxnUWkHmaYWRuUtc09Is1cT2neZaSOMC7trobE0ZCROzAM ZakZYwJgZJU+UuZGjJIZwelkqhYr1bKfH2Xm5yyVEOgPtjMIkWq2dN06A9KIiSgpNjOqxCCbq/+a OTan3+duqpBmTHwFG5nix5aWUO3he6wdDP1cKzhpcReJAGPNGCIta2AZjpGfs1RBwD1rasc+YTuH EKlkZoPvpwRcbTuHSF2sjE3xsznbx0wzpnRaXNOkIpmmKA7CwB4TQ/pE2spdpGxPVkdmQ9Oi96pP zBRKbky6O5mZp8idujgbW3ZMt9nQtjO7AWwwUcg0JmwkxjuK+G+ra8Y/ZzuPEKmkPFR0FrP+re0c IsUx7TGyKBkZaohaaVpNcSbgmtk1W6fHNU0qkmZMfAVvMXGRTEy5BsK0/riu7mFoW90dH4ZKtxqp lOSIcKqh/VG3vDOvtEULeDhhvQPKMXBIeq2xduyFBgoJIZIEM2hmSP8VJrYo+boIgV4C+E0AqwB6 jzV/rLxqB+lIeM+ezMM+1+nNCp8HRjAOmUTcmNmsmQAr1zSZ3nAPQ5N9eLfrfGSikDBPmjHxFWRk lIGJ+5mo0/rj6tMNlVpvqE7y03SakUEqQovvujV2yvw0b0fEBRBjR8adY3u/ECLZlC/2FREw0GxV WsTADK+nOXjV0KVRXaCXNxRsl6c7UgsxfcwU+91GIli5piHgNEOlNl5XUptWjwekEmnGxFfwx2am j5GlZoxOMzElgYC1BuKkBjJ0smd83OLXzit1UVz5GYDjYzskpBkToo0hFzeaWuEVwFuk8ePJ/nqZ KpyGNGGDiR8lZvSZVlOcmeiGhuAYuaYBpdE1TQqSWzziYKRMDWN3yi+pjulCOzpspLFg4E0TdZJd 7+LHcwDkmajFhJWtfEvLm7fDIAObewshkkd5aHgPEIYbKvfMXs/eQZP9QWnE0hWxqWeKnUzH7Wuo ViuYuaYBc1pc06QqacbEQRS775uqxYgMNlWr5egcE1WY6F8m6iQ7L1OBqVpKU6uaMUOjjx36llQY aSaFEEmAPX6YuTZ5TXf9/Hs/Grp0h4FaIkU5Shm7piFFF5mq1WIMI9c0IHrDSB0RF9KMiYNkepvf gZExcQAMv5E6LZQ7ouIEwMyzao5m281YQqYQM5lrxpjoP615vWa8Y+K4WqtzTdQRQtjH4IsNlHHZ oSumnrcibKCWSGEdN3ZcBRha3p7ZZ6JOSz0aGn4MCANM1GL71zTiCKQZEwdZOX/SdgCNhsol9MSl XPUdQ6U2r+7oMdIoxKB3fknl/8bzAPlDHskCUGSqno607mRPit42dGRpxoRoIwiIeREmBp6bMqze 9jlcJIHS0nkuQO8aKaYwvLp6jIllgFukWatRiHmRq32luDnr/wzUEXEizZg4FFPD2Xl9iyrPMlTr qJj4CkOlFmNeqWuoVtSYcVd+UcXlcauflTEewDFGihGtXtcwtlXPGyrNZpoxoiFG6ggh7COcEnMJ wlMmooi2Qpu5pmF03dJlS8K2UiFFE80UwrKpoxbIhs9JTJox8TVMvNRULQ2eYqrWkeQXV14MmJlb TYwGE3UMICb6Wx9/VXzmqRNdZ64YL2vtO1bv6flvANsMHHxwnm+OhQerhRAmBQIBBUaXWOsQ0wcm 8oi2gYieNVdLTTVV60hm1fnPBmOokWLJc00jDkOaMfE1GrzYWDGiK/uOruxhrN5hMBAwVEorr6fW UC0TMpXip/oWPRbz3eID5RZXjgJgbNSSNF5o9ZuWDo0QYOJDkuB4bjRQRwhhUb9vvtwBBvZWIdYt 3vNQtH3sOIsMVrt0+qLiE83VOzTt4TtgZp8hgFSNkToibqQZE1/TVDP+baDlG/geRXs3zAFDtQ5p /1S+IYbKLf1wQWmy7cfRzSWnIWdkxUkmip0yelZHAqaZqLUfh0kvjOaNmtjUh2RZn+K5cf+AFELE z14g00QdrVS2iTpf58mKT10RT2VDa5sAes9QOa/S7l2Gah3SrGChD4zvGir3RllBnSxrn+SkGROH QAzCP4xVA03tWzw3Lot55Pnm9GWiB03VY/CjpmqZREAfx6Vn+4yYe2ZslZiaI+1mAcg3kWu/19bX TmiK5o0eOKZGIbMc6IcRCMg5TYgU5XFcI8+1kNbG97gsDw3vAfB003VFovA8g8Uum9HgLzVY70uP LCk5XhPNNlWPmf9mqpaIH7lwEYdGVGGymoauir2ROFi/4urucDzzYWoRCmBzs+saa0LjoKfSelle 8dzi6N7OlFc8dxozjzEbi5+O9p0f1lz+HoAXjaQACvNePvnXJmoJIRLvwxcH7QagY62jCd82EOdL D9f7jgM7QYBPNllXJJI2eU0DAs+eXu+7wGTN6Q0Fnd1I5CkAvQ2V3M3esNGvW8SHNGPikBoXXv4i AJMPQXdTWi/KG1E1yESxnJEVJ4URWQrAWINHxNM+CV6x01S9OOkE6AX5xVW/zxlT3eKpOD18c9rn FldWAfxjw3k0sXoslgIMesRUGAA/zyuquBNgM3PtW6FfcU1mn6Kq0XkllZV5xZUf5BVXrs4vrrwk 0TmESFWBQECD0apVWQ+FQN99ZMkQI1MKZ9f7+ngUnoPBZ2xF4pUVht4F8KrBku2VQl15yFdootjs el8fBRViwFiDR0Qzpg5dutFUPRE/0oyJwyBm4AHDRY+D5sV5RVW/GDBgujeqCmOqnfySqmscl16F gf1oDrBVk/GvN14cBt/q7Ii8lVtcVbZ/v7BDGjBguje/qOLyLMfzNoEui0OWujV1Y9fEUiAD7hMw s6riPkS/yCuuXJgzstrU3cXDG7LEk19U4c8rrpwdxpaPFfHTYIwFcAKAfAYekqmTQrQCGbkJ2Nt1 vTGvFjtjkW+Qq/AyEPty+8I+JtxvuGQXMGrKG3y/+9PygVE9p8gMmhn0T3QVvQbgPIPZ9pDW9xis J+LIYzuASF66g2emsyPycwAmV0PMBPGdG4/rODm3uPIhciPzGoNXrD7am/L8j/Vk8oyhHZFrGDjN YJ79+I9rF47fbL5uXJ1I4BmcnXlvblHVEgJeheKPwaQZujsRnb2RMQxm//4OoplifoZiVe2EbfnF lfcz8EsTmfahYseNrMwrrrwf2v1LY/3EDaYq9x1d2UOHuYBBhYQNJQzqfoSX98x59fSeTcB6U8cX oi0jojeY+eJY6zDTb2Y0FPxrSmGovrXvra4e42zrtuV/WONOABmxZhHJ4SNn7xO9IxkBAP0MlvUA uK3jzo4/KA8VPszQT0wpWHTUxUIervcd53X4uzNDdA2IjWzLcyAGHp7ka4h5lFkkhjRj4rCa5pXu ziuuuhfgP8ShfB4Bd8Px3J1XXLUK4LfB+BCgzUw6rJiICccCyANwBoDTCByHGAAI//Fy11S+g9SJ iL8D4Dv7vkUMAiFe364DvLGu7vIFwNjYK2U234fmzOuA2PcYOkB7AD+Hcm7OK64MEdECcunF1c09 3sHSoZGjvjsQUL1f7tfLYZWvFJ0J5oEMXKDDOAX7vsMt4tHh7pBmTIgW0Vo/S0TXGyjlJainZzb4 ftG0bNCfAoFAi55FK6/3DduqtvweTOcbyCCSSGDo0sjMBt/vGJgVh/I9wfRrgvPr8obCRhC9CcYH IN7EWu0FACI+BkAu9l3TnMFM8ZpOv87JzrojTrVFHEgzJo6oQwdn2o4dkSux7+QRJ9wPQL99V7f7 GokEPvHDCjx1VW1Jc8KO2EYw6dsBMtLyrXnqqi15RZW/BSEeTXEGgBJmLmHFyMve0IziyvVEWK8Z uxTRNgYcgDsxI4OA9gB1xsucCyADBDBH/2W6rupo7CsRoo1j4sUE2gPAxDNfmQz8offg/2/v3oPj rM47jv+esyvfJN+gGQYb28FmOglpS5NAWwyT+iLLyBM3KQQFSa5bgi9J6YTATKckTYtKSxgyU7ch TQuDSTMtTRN5piE46LJyiifGhqYhNAQwFBu0whiIMbZsC2FJe57+4ZhAio1W++6+K/H9/OM/rPec 3+7OXp5zznvOrs/c1dNwh7v3zDo866dNTVsKJ/+gvf3KzJGZ/e/3jJYpqlWm30rqeCdUn307F39j 7iW71km6uHy92AK5FkiS3GTJfE2Ovveoa6+59L6jFe0UJaEYw2k9saVpaH7jPRtN4QeamPcYbnqu o/UHaYcYb8y0I9+x5ntJtpmfnt204NjIFUrwBuZTmCxpobsWmt5aaP3iJ1hyX54hWF1ijQET3MYV 2/o35xruddNVCTa7UNKXzUz9s/sH7+ppOCTpqKRp/ep/j6QpclGDvQu0tbXFzd0NGzzox5LGdu96 FXPp6+tX5ramnQPFmYg/rpGwvs41O2X627RzlMF/5gfPvjHtEOPQ6xrRhsRb3dJUUAhXS3o98bbT 5AV+4gFFKFj8Whmbnyppjk5syjFPyczAYRxZtzL3uBK9R7lKuH5Ykx26Nu0YKB7FGEYlX5v9vEtF 3whdxZ4dzuqTo7p3CG9hppt7cy1PlaPt/P1X7Tb3q1WJO94qxBVYAgsUYeOKbQ+6lEs7ByaudfW5 2yRvTztHgl5UTeaKq5dun1iDme8SFGMYnS1NBQVvlvRk2lESsDeTzS7dv7WF8zeK19Fbmy3rZie9 Xa3fMtOfl7OPSrKMBtPOAIw7GbtBUjUPZEyYAaN3IzN5fH3K1ZL/d9pZErA/ZMLS9Us796UdBGND MYZR67u/9VC2MLJUpp+knWWsXHpmRHHJs1ub+tLO8g72ybU57RC/ZHdmONuiLU2Fd/7T0vR2tNzq rlvL3U8lWKH0Q2yBd5sNy7qfkPTXaec4hcfM7Pa0Q6A0G1dvfS3KV8i1K+0sY+baV3Bfcs2yrqfT joKxoxhDUfbm1v6sUMguk/RQ2lmK5fLcSFaLX+hcMx5Gjwr5ruYNJrst7SA/tzeYNz67ram/Uh32 dbV8waXPShrVltRV6tiZB46wrT0wBi/sXHyrS/emneMtTI9MssIydz+QdhSUbuOKbf1DNUMr5epJ O8sY7Azy3/50Q88zaQdBaSjGULR93U2v1tVll7j09xofSzWi3P+mr65m1fhammje29l8o8n/WNJw ikF2xxH7yHMdrflKd9zX2fJVKXxU0kuV7jsRpq2PPLIxzdcOGLfa2trilKg1km9PO4skyWzL5IJ+ 9w/rv38w7ShIzrVLtx+beXhmo5vfrPEy+Oe6Pc4+uJSDnScGijGMyRNbmob6Oluuj24fl1TNM02P muIl+a7Wv6jE8rpy6O1s/Sdza5BU8ULSpK2FmL30+Z7m1D7w851XddYo+xuSvpNWhjHaG7K6Pu0Q wHi2dmVu4GjtsVWSvptijCF3u3Hd8u5Prl2ZG0gxB8qkqWlLYUN9z03m3iBpb9p5TmN3MFu2viF3 3cYLH2Ggb4KgGENJnu9qvu/1wsj7TLpZqqqNCl6W2Z/k67IX9XaueTjtMKXq7WreHkfsApdXaoex QXO7obez+WP7upterVCfp7Sns+lAvrPlcjc1SHok7Tzv4BUz/6saxQ89d1/Ly0k0aIVMIqO1JqvE qG/JfUSPxbfhIZnHZirboI1bJpG2LST0WN9GtITajmN4DU/hhsUPDa6rz/2+u39O0mtJtTtKP4qK F25o6L7N7E0rQUo5CT4BFhL6TPDx8pngFZmxWtfQ8/3B4cwH3O1GnTiLrlocNPM/i7MPXnBNffcD aYdBsijGULKXc2sHejtbblIsLJJ0i6QU19LbHnN92gaPvzff0fy18Tob9nae72ne39fZctnPly0e KlM37vJvB/P393Y1/51kVbUMta+jpSff2XyRTJ+Q6wFVzzLZIZnuN9lVhbrs/N6O1rY9nWuOJNX4 jLrwvBJYqhndf5RAnHdSch+mUHQbGRv5H0mlHlURXYVHS2zjlBLKeNxifCyJPG/HVHhSpb+vjmfc fppEnpPM5Bsaer5iIXu+zLao3O991z5zX/9CdujijSu2/f/HYmGsK0IGZhysfbG0cNLkQtwrqeSB Mlcs+2eCJfKZUHobo/XZVZ3HNzR03xazQwslfVFSya9XCfrc/XOToxasq+/5MrNhExOHkSJx51zZ PjV7dKRJpstdWqETh2yW0ysm+4/oau+bntmedgG2oPGbj0r6zRKbyec7W957qv+cs/qbv1IzYjdJ /keS6krsS5IG3ezbsvgPffe3VvvM0xsWrvrWr44oXm2uj0r6tQp27ZKekfSgTB1TsoO5p++7pqyj qPMb7znfFG6RvEGyaUVc6pJ+Yu5f6e1q/UaZ4r3hvMb29wz7yJdkukLS7CIvf1auO/LTs5vG8j6+ q7thmTK6Sa7FkrJFXDri0g4z3bq+PlfWG/k351Ys92B/OYaMxyXb7sFv3rA8V9bd3zZ3N6z2oC9I ukhSpohLh2XaIVnb+vruHWWKJ0n6+rbG8wuK17t7k0kzEmz6cbnfka0Zvvt0Zzbd3tE4eeqk+K9y v1yjf46eddN1G+pz30si6N1dKy8oZOItJlum4r5no6Qfy33T+oaef08iy+ncnVsxJ8q+JNPHJc0s 7mr7X/f4j/t3XfLVtra2VO7nam+/ctLhM/ovD65PuHSZpNqydmg6ZLLvRvN2n3lwGwXYxEcxhrI6 q+FfaqeEmgZZXCzZRZI+JGl6ic2+YtJDLn/YZDt7B8/eWU2HN1eiGDvpvMZ7Zgx7WOPmHzPZRyRN KaKPQ2a2zV1dhZi5txqWI5ZibuM952SVaZD8dyR9UCeKs2Kej1M5Jukpk56Ua7fkj8aMfth3f2u5 ZidP68MfvrPm4JnTF8Vgc6V4RghW5+61LtWabMTlR4NCf7TCgEe9bBnbk0rWtrYw77/ed27GfZ4H P9Pda4NUG6UZJ2Zc/XBwO1qQDYSMXgkZ35vUss47t66eFqYOn2fyOYpxuqSZUaqVwhQpvh6kAUn9 CuGoy/bHwZo9G1dvrejyt9FmjOZHQgwvZCYN7an0ga53P/h70wuvDS5Sxs4OBa+LFmbJ47Rqeh43 7bp4at3AjFVBvtKl5ZIWFtmEu+kxc+WChe9cU99V1E7Bd/bUz8zEcIGbz/dgMxTtje83Mx03+RGZ 7R8etqc+09jVW2S2UWlvv3LSsTOOnTeiOCfEONst1JlUG121CnHYZMfc7XCQBjz6SzHEPRtXbKvY zrhvypl5ddbhhTUhnBPlZ9qJz4NaRZtu5lGmfpeOhIIGPMQDmeGavZ9a1VlVu1b+8wNLphRGJi93 +aUnftP4hSq6wPwlpkMW9bAHf8iids04PGtHU9OWoWQSYzygGENlXdmemf/a8IIQbV40m2/u8002 XdJsN2VOjnC6+2GZhs3taFQ8FBR6pUJeNqm3t6OpqnfWq2Qx9mZzVm+dVjN89AI3//UgWyTZrKg4 08yiuR11835FHXDTUzEWdu+bMfm5tGcRy2rJA9lzp+xbFC0z183myuOcoDBbJ2dr7MS/0f21IDvu 7sf8xOzDS8G8r+DxheC+L9/9B2kuUQFQhM1dK89QiB/0YIvkmiP5WXKbqmDTJD8u92Oy8KrkeY/+ 9OQQH2d3RIyVu+yOrssWZGt8nmJcYMHmST4zRptlwTL6RaHWL/dhMz8iWb/c8jF4b6bgvZ9a0fPi W+5HxLsOxRiQsLSKMQAAAIwvbOABAAAAACmgGAMAAACAFFCMAQAAAEAKKMYAAAAAIAUUYwAAAACQ AooxAAAAAEgBxRgAAAAApIBiDAAAAABSQDEGAAAAACmgGAMAAACAFFCMAQAAAEAKKMYAAAAAIAUU YwAAAACQAooxAAAAAEgBxRgAAAAApIBiDAAAAABSQDEGAAAAACmgGAMAAACAFFCMAQAAAEAKKMaA 5I1USRsAAACoYhRjQMLctK3kNuQ9SWQBAABA9aIYAxLWV5v9omR/Ktme4q+2Peb6/PS6muuSTwYA AIBqYmkHACayc1f924IY9QELdpa717pUGxRmS5LcD0fTgMkG3Ao/s5H4RD639rmUIwMAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA APwflgAxUsonrkcAAAAASUVORK5CYII= "
+ preserveAspectRatio="none"
+ height="54.204151"
+ width="65" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="BADGE"
+ style="display:none"
+ sodipodi:insensitive="true">
+ <g
+ style="display:inline"
+ transform="translate(-340.00001,-581)"
+ id="g4394"
+ clip-path="none">
+ <g
+ id="g855">
+ <g
+ inkscape:groupmode="maskhelper"
+ id="g870"
+ clip-path="url(#clipPath873)"
+ style="opacity:0.6;filter:url(#filter891)">
+ <circle
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
+ id="path844"
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;enable-background:accumulate"
+ cx="252"
+ cy="552.36218"
+ r="12" />
+ </g>
+ <g
+ id="g862">
+ <circle
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;enable-background:accumulate"
+ id="path4398"
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
+ cx="252"
+ cy="552.36218"
+ r="12" />
+ <circle
+ transform="matrix(1.25,0,0,1.25,33,-100.45273)"
+ id="path4400"
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;enable-background:accumulate"
+ cx="252"
+ cy="552.36218"
+ r="12" />
+ <path
+ sodipodi:type="star"
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;enable-background:accumulate"
+ id="path4459"
+ sodipodi:sides="5"
+ sodipodi:cx="666.19574"
+ sodipodi:cy="589.50385"
+ sodipodi:r1="7.2431178"
+ sodipodi:r2="4.3458705"
+ sodipodi:arg1="1.0471976"
+ sodipodi:arg2="1.6755161"
+ inkscape:flatsided="false"
+ inkscape:rounded="0.1"
+ inkscape:randomized="0"
+ d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
+ transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
--- /dev/null
+includes:
+ - 'layer:basic'
+ - 'interface:mysql'
+ - 'interface:openvim'
+ - 'interface:http'
+
+options:
+ basic:
+ packages:
+ - python-yaml
+ - python-bottle
+ - python-mysqldb
+ - python-jsonschema
+ - python-paramiko
+ - python-argcomplete
+ - python-requests
+ - python3-git
+
+ # These are for openstack as a VIM
+ # - python-novaclient
+ # - python-keystoneclient
+ # - python-glanceclient
+ # - python-neutronclient
+
+ # mysql client needed to install database
+ - mariadb-client
--- /dev/null
+name: openmano
+summary: OpenMANO
+maintainers:
+ - Adam Israel <adam@adamisrael.com>
+description: |
+ Installs and configures OpenMANO
+tags:
+ - nfv
+ - telco
+ - osm
+series:
+ - xenial
+subordinate: false
+requires:
+ db:
+ interface: mysql
+ openvim-controller:
+ interface: openvim
+provides:
+ openmano:
+ interface: http
--- /dev/null
+from git import Repo as gitrepo
+from shutil import rmtree
+
+import os
+import subprocess
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+from charmhelpers.core import templating
+from charmhelpers.core.unitdata import kv
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ open_port,
+ status_set,
+)
+
+from charmhelpers.core.host import (
+ chownr,
+)
+
+from charms.reactive import (
+ when,
+ when_not,
+ set_state,
+ is_state,
+)
+
+kvdb = kv()
+
+INSTALL_PATH = '/opt/openmano'
+USER = 'openmanod'
+
+
+@when('openmano.installed', 'openmano.available')
+def openmano_available(openmano):
+ # TODO make this configurable via charm config
+ openmano.configure(port=9090)
+
+
+@when('openvim-controller.available',
+ 'db.available',
+ 'db.installed',
+ 'openmano.installed',
+ 'openmano.running',
+ )
+def openvim_available(openvim, db):
+ for service in openvim.services():
+ for endpoint in service['hosts']:
+ host = endpoint['hostname']
+ port = endpoint['port']
+ user = endpoint['user']
+
+ openvim_uri = '{}:{}'.format(host, port)
+ if kvdb.get('openvim_uri') == openvim_uri:
+ return
+
+ # TODO: encapsulate the logic in create-datacenter.sh into python
+ try:
+ cmd = './scripts/create-datacenter.sh {} {} {} {}'.format(
+ host, port, user, kvdb.get('openmano-tenant'))
+ out, err = _run(cmd)
+ except subprocess.CalledProcessError as e:
+ # Ignore the error if the datacenter already exists.
+ if e.returncode != 153:
+ raise
+
+ kvdb.set('openvim_uri', openvim_uri)
+ if not is_state('db.available'):
+ status_set('waiting', 'Waiting for database')
+ break
+ break
+
+
+@when('openmano.installed',
+ 'db.installed',
+ 'openvim-controller.available')
+@when_not('openmano.running')
+def start(*args):
+ # TODO: if the service fails to start, we should raise an error to the op
+ # Right now, it sets the state as running and the charm dies. Because
+ # service-openmano returns 0 when it fails.
+ cmd = "/home/{}/bin/service-openmano start".format(USER)
+ out, err = _run(cmd)
+
+ if not kvdb.get('openmano-tenant'):
+ out, err = _run('./scripts/create-tenant.sh')
+ kvdb.set('openmano-tenant', out.strip())
+
+ status_set(
+ 'active',
+ 'Up on {host}:{port}'.format(
+ host=hookenv.unit_public_ip(),
+ port='9090'))
+
+ set_state('openmano.running')
+
+
+@when('db.available', 'openmano.installed')
+@when_not('db.installed')
+def setup_db(db):
+ """Setup the database
+
+ """
+ db_uri = 'mysql://{}:{}@{}:{}/{}'.format(
+ db.user(),
+ db.password(),
+ db.host(),
+ db.port(),
+ db.database(),
+ )
+
+ if kvdb.get('db_uri') == db_uri:
+ # We're already configured
+ return
+
+ status_set('maintenance', 'Initializing database')
+
+ try:
+ # HACK: use a packed version of init_mano_db until bug https://osm.etsi.org/bugzilla/show_bug.cgi?id=56 is fixed
+ # cmd = "{}/database_utils/init_mano_db.sh --createdb ".format(kvdb.get('repo'))
+ cmd = "./scripts//init_mano_db.sh --createdb "
+ cmd += "-u {} -p{} -h {} -d {} -P {}".format(
+ db.user(),
+ db.password(),
+ db.host(),
+ db.database(),
+ db.port(),
+ )
+ output, err = _run(cmd)
+ except subprocess.CalledProcessError:
+ # Eat this. init_mano_db.sh will return error code 1 on success
+ pass
+
+ context = {
+ 'user': db.user(),
+ 'password': db.password(),
+ 'host': db.host(),
+ 'database': db.database(),
+ 'port': db.port(),
+ }
+ templating.render(
+ 'openmanod.cfg',
+ os.path.join(kvdb.get('repo'), 'openmanod.cfg'),
+ context,
+ owner=USER,
+ group=USER,
+ )
+ kvdb.set('db_uri', db_uri)
+
+ status_set('active', 'Database installed.')
+ set_state('db.installed')
+
+
+@when_not('openvim-controller.available')
+def need_openvim():
+ status_set('waiting', 'Waiting for OpenVIM')
+
+
+@when_not('db.available')
+def need_db():
+ status_set('waiting', 'Waiting for database')
+
+
+@when_not('db.available')
+@when_not('openvim-controller.available')
+def need_everything():
+ status_set('waiting', 'Waiting for database and OpenVIM')
+
+
+@when_not('openmano.installed')
+def install_layer_openmano():
+ status_set('maintenance', 'Installing')
+
+ cfg = config()
+
+ # TODO change user home
+ # XXX security issue!
+ host.adduser(USER, password=USER)
+
+ if os.path.isdir(INSTALL_PATH):
+ rmtree(INSTALL_PATH)
+
+ gitrepo.clone_from(
+ cfg['repository'],
+ INSTALL_PATH,
+ branch=cfg['branch'],
+ )
+
+ chownr(
+ INSTALL_PATH,
+ owner=USER,
+ group=USER,
+ follow_links=False,
+ chowntopdir=True
+ )
+
+ os.mkdir(os.path.join(INSTALL_PATH, 'logs'))
+ chownr(INSTALL_PATH, USER, USER)
+ kvdb.set('repo', INSTALL_PATH)
+
+ os.mkdir('/home/{}/bin'.format(USER))
+
+ os.symlink(
+ "{}/openmano".format(INSTALL_PATH),
+ "/home/{}/bin/openmano".format(USER))
+ os.symlink(
+ "{}/scripts/openmano-report.sh".format(INSTALL_PATH),
+ "/home/{}/bin/openmano-report.sh".format(USER))
+ os.symlink(
+ "{}/scripts/service-openmano.sh".format(INSTALL_PATH),
+ "/home/{}/bin/service-openmano".format(USER))
+
+ open_port(9090)
+ set_state('openmano.installed')
+
+
+def _run(cmd, env=None):
+ if isinstance(cmd, str):
+ cmd = cmd.split() if ' ' in cmd else [cmd]
+
+ log(cmd)
+ p = subprocess.Popen(cmd,
+ env=env,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ retcode = p.poll()
+ if retcode > 0:
+ raise subprocess.CalledProcessError(
+ returncode=retcode,
+ cmd=cmd,
+ output=stderr.decode("utf-8").strip())
+ return (stdout.decode('utf-8'), stderr.decode('utf-8'))
--- /dev/null
+#!/bin/sh
+HOME=/home/openmanod
+OPENMANO=$HOME/bin/openmano
+export OPENMANO_TENANT=$4
+
+OPENMANO_DATACENTER=`$OPENMANO datacenter-list myov`
+if [ $? -eq 0 ]; then
+ # If the datacenter exists, the current approach is to delete the existing
+ # one and create a new one. We may want to change this behavior to retain
+ # the existing datacenter, but this script will also go away in favour of
+ # a python API to OpenMano
+
+ # If the datacenter exists, remove all traces of it before continuing
+ OPENMANO_DATACENTER=`echo $OPENMANO_DATACENTER |gawk '{print $1}'`
+
+ # Delete netmap
+ $OPENMANO datacenter-netmap-delete --all -f --datacenter $OPENMANO_DATACENTER
+
+ # detach
+ $OPENMANO datacenter-detach -a $OPENMANO_DATACENTER
+
+ # Make sure the datacenter is deleted
+ $OPENMANO datacenter-delete --force myov
+
+ OPENMANO_DATACENTER=`$OPENMANO datacenter-create myov http://$1:$2/openvim`
+fi
+OPENMANO_DATACENTER=`echo $OPENMANO_DATACENTER |gawk '{print $1}'`
+
+
+# if ! grep -q "^export OPENMANO_DATACENTER" $HOME/.bashrc
+# then
+# echo "export OPENMANO_DATACENTER=$OPENMANO_DATACENTER " >> $HOME/.bashrc
+# fi
+
+$OPENMANO datacenter-attach myov --vim-tenant-id $3
+$OPENMANO datacenter-netmap-import -f --datacenter $OPENMANO_DATACENTER
--- /dev/null
+#!/bin/sh
+OPENMANO=/home/openmanod/bin/openmano
+OPENMANO_TENANT=`$OPENMANO tenant-create mytenant --description=mytenant`
+if [ $? -ne 0 ]; then
+ OPENMANO_TENANT=`$OPENMANO tenant-list mytenant`
+fi
+echo $OPENMANO_TENANT |gawk '{print $1}'
--- /dev/null
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+DBUSER="mano"
+DBPASS=""
+DBHOST="localhost"
+DBPORT="3306"
+DBNAME="mano_db"
+CREATEDB=""
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+#DIRNAME=`dirname $0`
+DIRNAME=/opt/openmano/database_utils
+
+function usage(){
+ echo -e "Usage: $0 OPTIONS"
+ echo -e " Inits openmano database; deletes previous one and loads from ${DBNAME}_structure.sql"
+ echo -e " OPTIONS"
+ echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails"
+ echo -e " -p PASS database password. 'No password' by default. Prompts if DB access fails"
+ echo -e " -P PORT database port. '$DBPORT' by default"
+ echo -e " -h HOST database host. '$DBHOST' by default"
+ echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
+ echo -e " --help shows this help"
+ echo -e " --createdb forces the deletion and creation of the database"
+}
+
+while getopts ":u:p:P:d:h:-:" o; do
+ case "${o}" in
+ u)
+ DBUSER="$OPTARG"
+ ;;
+ p)
+ DBPASS="$OPTARG"
+ ;;
+ P)
+ DBPORT="$OPTARG"
+ ;;
+ d)
+ DBNAME="$OPTARG"
+ ;;
+ h)
+ DBHOST="$OPTARG"
+ ;;
+ -)
+ if [ "${OPTARG}" == "help" ]; then
+ usage && exit 0
+ elif [ "${OPTARG}" == "createdb" ]; then
+ CREATEDB="yes"
+ else
+ echo "Invalid option: --$OPTARG" >&2 && usage >&2
+ exit 1
+ fi
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2 && usage >&2
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2 && usage >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit -1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+#check and ask for database user password
+DBUSER_="-u$DBUSER"
+DBPASS_=""
+[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+DBHOST_="-h$DBHOST"
+DBPORT_="-P$DBPORT"
+
+TEMPFILE="$(mktemp -q --tmpdir "initmanodb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT SIGINT SIGTERM
+chmod 0600 "$TEMPFILE"
+cat >"$TEMPFILE" <<EOF
+[client]
+user="${DBUSER}"
+password="${DBPASS}"
+EOF
+DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+
+while ! mysql $DEF_EXTRA_FILE_PARAM $DBHOST_ $DBPORT_ -e "quit" >/dev/null 2>&1
+do
+ [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
+ [ -z "$logintry" ] && echo -e "\nProvide database credentials"
+# read -e -p "mysql database name($DBNAME): " KK
+# [ -n "$KK" ] && DBNAME="$KK"
+ read -e -p "mysql user($DBUSER): " KK
+ [ -n "$KK" ] && DBUSER="$KK"
+ read -e -s -p "mysql password: " DBPASS
+ cat >"$TEMPFILE" <<EOF
+[client]
+user="${DBUSER}"
+password="${DBPASS}"
+EOF
+ logintry="yes"
+ echo
+done
+
+if [ -n "${CREATEDB}" ]; then
+ echo " deleting previous database ${DBNAME}"
+ echo "DROP DATABASE IF EXISTS ${DBNAME}" | mysql $DEF_EXTRA_FILE_PARAM $DBHOST_ $DBPORT_
+ echo " creating database ${DBNAME}"
+ mysqladmin $DEF_EXTRA_FILE_PARAM $DBHOST_ $DBPORT_ -s create ${DBNAME} || exit 1
+fi
+
+echo " loading ${DIRNAME}/${DBNAME}_structure.sql"
+#echo 'mysql '$DEF_EXTRA_FILE_PARAM' '$DBHOST_' '$DBPORT_' '$DBNAME' < '${DIRNAME}'/mano_db_structure.sql'
+mysql $DEF_EXTRA_FILE_PARAM $DBHOST_ $DBPORT_ $DBNAME < ${DIRNAME}/mano_db_structure.sql
+
+echo " migrage database version"
+${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME
+
--- /dev/null
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#HTTP Server parameters (MANO API). IP address and port where openmanod listens
+# IPtables/firewalld must allow this port
+# for CentOS/Redhad firewalld is configured at '/etc/firewalld/services/openmanod.xml'
+# edit this file and reload firewalld with 'firewall-cmd --reload' if port is changed
+http_host: 0.0.0.0 # IP address, (by default, 0.0.0.0 means that it will listen in all interfaces)
+http_port: 9090 # General port (by default, 9090)
+#http_admin_port: 9095 # Admin port where openmano is listening (when missing, no administration server is launched)
+ # Not used in current version!
+
+# Ports to be used. Comma separated list. Can contain a {"from":<port>, "to":<port>} entry
+#e.g. from 9000 to 9005: [{"from":9000, "to":9005}], or also [9000,9001,9002,9003,9004,9005]
+#e.g. from 9000 to 9100 apart from 9050,9053: [{"from":9000, "to":9049},9051,9052,{"from":9054, "to":9099}]
+http_console_ports: [{"from":9096, "to":9110}]
+
+#Database parameters
+db_host: {{host}} # by default localhost
+db_user: {{user}} # DB user
+db_passwd: {{password}} # DB password
+db_name: {{database}} # Name of the MANO DB
+
+#other MANO parameters
+# Folder where the VNF descriptors will be stored
+# The folder will be created in the execution folder if it does not exist
+vnf_repository: "./vnfrepo" # Use an absolute path to avoid misunderstandings
--- /dev/null
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
--- /dev/null
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('layer-openmano')
+ self.d.expose('layer-openmano')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['layer-openmano'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# This file is standalone vmware vcloud director util
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: mbayramov@vmware.com
+##
+
+"""
+
+Standalone application that leverage openmano vmware connector work with vCloud director rest api.
+
+ - Provides capability to create and delete VDC for specific organization.
+ - Create, delete and manage network for specific VDC
+ - List deployed VM's , VAPPs, VDSs, Organization
+ - View detail information about VM / Vapp , Organization etc
+ - Operate with images upload / boot / power on etc
+
+ Usage example.
+
+ List organization created in vCloud director
+ vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
+
+ List VDC for particular organization
+ vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
+
+ Upload image
+ python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
+
+ Boot Image
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
+
+ View vApp
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
+
+ List VMS
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
+
+ List VDC in OSM format
+ python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
+
+Mustaafa Bayramov
+mbayramov@vmware.com
+"""
+import os
+import argparse
+import traceback
+import uuid
+
+from xml.etree import ElementTree as ET
+
+import sys
+from pyvcloud import Http
+
+import logging
+import vimconn
+import time
+import uuid
+import urllib3
+import requests
+
+from vimconn_vmware import vimconnector
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+from prettytable import PrettyTable
+
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+__author__ = "Mustafa Bayramov"
+__date__ = "$16-Sep-2016 11:09:29$"
+
+
+# TODO move to main vim
+def delete_network_action(vca=None, network_uuid=None):
+ """
+ Method leverages vCloud director and query network based on network uuid
+
+ Args:
+ vca - is active VCA connection.
+ network_uuid - is a network uuid
+
+ Returns:
+ The return XML respond
+ """
+
+ if vca is None or network_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ print response.content
+ return response.content
+
+ return None
+
+
+def print_vapp(vapp_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ vapp_dict: container vapp object.
+
+ Returns:
+ The return nothing
+ """
+
+ # following key available to print
+ # {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
+ # 'memoryMB': '384',
+ # 'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
+ # 'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
+ # 'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
+ # 'containerName': 'Cirros Template', # 'container':
+ # 'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
+ # 'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
+ # 'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
+ # 'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
+
+ if vapp_dict is None:
+ return
+
+ vm_table = PrettyTable(['vm uuid',
+ 'vapp name',
+ 'vapp uuid',
+ 'network name',
+ 'storage name',
+ 'vcpu', 'memory', 'hw ver','deployed','status'])
+ for k in vapp_dict:
+ entry = []
+ entry.append(k)
+ entry.append(vapp_dict[k]['containerName'])
+ # vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
+ entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
+ entry.append(vapp_dict[k]['networkName'])
+ entry.append(vapp_dict[k]['storageProfileName'])
+ entry.append(vapp_dict[k]['numberOfCpus'])
+ entry.append(vapp_dict[k]['memoryMB'])
+ entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
+ entry.append(vapp_dict[k]['isDeployed'])
+ entry.append(vapp_dict[k]['status'])
+
+ vm_table.add_row(entry)
+
+ print vm_table
+
+
+def print_org(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+
+ if org_dict is None:
+ return
+
+ org_table = PrettyTable(['org uuid', 'name'])
+ for k in org_dict:
+ entry = [k, org_dict[k]]
+ org_table.add_row(entry)
+
+ print org_table
+
+
+def print_vm_list(vm_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ vm_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if vm_dict is None:
+ return
+
+ vm_table = PrettyTable(
+ ['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
+
+ try:
+ for k in vm_dict:
+ entry = []
+ entry.append(k)
+ entry.append(vm_dict[k]['name'])
+ entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
+ entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
+ entry.append(vm_dict[k]['networkName'])
+ entry.append(vm_dict[k]['isDeployed'])
+ entry.append(vm_dict[k]['numberOfCpus'])
+ entry.append(vm_dict[k]['memoryMB'])
+ entry.append(vm_dict[k]['status'])
+ vm_table.add_row(entry)
+ print vm_table
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ pass
+
+
+def print_vdc_list(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ vdcs_dict = {}
+ if org_dict.has_key('vdcs'):
+ vdcs_dict = org_dict['vdcs']
+ vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
+ for k in vdcs_dict:
+ entry = [k, vdcs_dict[k]]
+ vdc_table.add_row(entry)
+
+ print vdc_table
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def print_network_list(org_dict=None):
+ """ Method print network list.
+
+ Args:
+ org_dict: dictionary of organization that contain key networks with a list of all
+ network for for specific VDC
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ network_dict = {}
+ if org_dict.has_key('networks'):
+ network_dict = org_dict['networks']
+ network_table = PrettyTable(['network uuid', 'network name'])
+ for k in network_dict:
+ entry = [k, network_dict[k]]
+ network_table.add_row(entry)
+
+ print network_table
+
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def print_org_details(org_dict=None):
+ """ Method takes vapp_dict and print in tabular format
+
+ Args:
+ org_dict: dictionary of organization where key is org uuid.
+
+ Returns:
+ The return nothing
+ """
+ if org_dict is None:
+ return
+ try:
+ catalogs_dict = {}
+
+ print_vdc_list(org_dict=org_dict)
+ print_network_list(org_dict=org_dict)
+
+ if org_dict.has_key('catalogs'):
+ catalogs_dict = org_dict['catalogs']
+
+ catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
+ for k in catalogs_dict:
+ entry = [k, catalogs_dict[k]]
+ catalog_table.add_row(entry)
+
+ print catalog_table
+
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def delete_actions(vim=None, action=None, namespace=None):
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Requesting delete for network {}".format(namespace.network_name))
+ network_uuid = namespace.network_name
+ # if request name based we need find UUID
+ # TODO optimize it or move to external function
+ if not namespace.uuid:
+ org_dict = vim.get_org_list()
+ for org in org_dict:
+ org_net = vim.get_org(org)['networks']
+ for network in org_net:
+ if org_net[network] == namespace.network_name:
+ network_uuid = network
+
+ vim.delete_network_action(network_uuid=network_uuid)
+
+
+def list_actions(vim=None, action=None, namespace=None):
+ """ Method provide list object from VDC action
+
+ Args:
+ vim - is vcloud director vim connector.
+ action - is action for list ( vdc / org etc)
+ namespace - must contain VDC / Org information.
+
+ Returns:
+ The return nothing
+ """
+
+ org_id = None
+ myorgs = vim.get_org_list()
+ for org in myorgs:
+ if myorgs[org] == namespace.vcdorg:
+ org_id = org
+ break
+ else:
+ print(" Invalid organization.")
+ return
+
+ if action == 'vms' or namespace.action == 'vms':
+ vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
+ print_vm_list(vm_dict=vm_dict)
+ elif action == 'vapps' or namespace.action == 'vapps':
+ vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
+ print_vapp(vapp_dict=vapp_dict)
+ elif action == 'networks' or namespace.action == 'networks':
+ if namespace.osm:
+ osm_print(vim.get_network_list(filter_dict={}))
+ else:
+ print_network_list(vim.get_org(org_uuid=org_id))
+ elif action == 'vdc' or namespace.action == 'vdc':
+ if namespace.osm:
+ osm_print(vim.get_tenant_list(filter_dict=None))
+ else:
+ print_vdc_list(vim.get_org(org_uuid=org_id))
+ elif action == 'org' or namespace.action == 'org':
+ print_org(org_dict=vim.get_org_list())
+ else:
+ return None
+
+
+def print_network_details(network_dict=None):
+ try:
+ network_table = PrettyTable(network_dict.keys())
+ entry = [network_dict.values()]
+ network_table.add_row(entry[0])
+ print network_table
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def osm_print(generic_dict=None):
+
+ try:
+ for element in generic_dict:
+ table = PrettyTable(element.keys())
+ entry = [element.values()]
+ table.add_row(entry[0])
+ print table
+ except KeyError:
+ logger.error("wrong key {}".format(KeyError.message))
+ logger.logger.debug(traceback.format_exc())
+
+
+def view_actions(vim=None, action=None, namespace=None):
+ org_id = None
+ orgs = vim.get_org_list()
+ for org in orgs:
+ if orgs[org] == namespace.vcdorg:
+ org_id = org
+ break
+ else:
+ print(" Invalid organization.")
+ return
+
+ myorg = vim.get_org(org_uuid=org_id)
+
+ # view org
+ if action == 'org' or namespace.action == 'org':
+ org_id = None
+ orgs = vim.get_org_list()
+ if namespace.uuid:
+ if namespace.org_name in orgs:
+ org_id = namespace.org_name
+ else:
+ # we need find UUID based on name provided
+ for org in orgs:
+ if orgs[org] == namespace.org_name:
+ org_id = org
+ break
+
+ logger.debug("Requesting view for orgs {}".format(org_id))
+ print_org_details(vim.get_org(org_uuid=org_id))
+
+ # view vapp action
+ if action == 'vapp' or namespace.action == 'vapp':
+ if namespace.vapp_name is not None and namespace.uuid:
+ logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
+ vapp_dict = {}
+ vapp_uuid = namespace.vapp_name
+ # if request based on just name we need get UUID
+ if not namespace.uuid:
+ vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
+ if vapp_uuid is None:
+ print("Can't find vapp by given name {}".format(namespace.vapp_name))
+ return
+
+ print " namespace {}".format(namespace)
+ if vapp_dict is not None and namespace.osm:
+ vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
+ print vm_info_dict
+ if vapp_dict is not None and namespace.osm != True:
+ vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
+ print_vapp(vapp_dict=vapp_dict)
+
+ # view network
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Requesting view for network {}".format(namespace.network_name))
+ network_uuid = namespace.network_name
+ # if request name based we need find UUID
+ # TODO optimize it or move to external function
+ if not namespace.uuid:
+ if not myorg.has_key('networks'):
+ print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
+ vim.name,
+ vim.tenant_name))
+ return
+
+ my_org_net = myorg['networks']
+ for network in my_org_net:
+ if my_org_net[network] == namespace.network_name:
+ network_uuid = network
+ break
+
+ print print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid))
+
+
+def create_actions(vim=None, action=None, namespace=None):
+ """Method gets provider vdc view from vcloud director
+
+ Args:
+ vim - is Cloud director vim connector
+ action - action for create ( network / vdc etc)
+
+ Returns:
+ The return xml content of respond or None
+ """
+ if action == 'network' or namespace.action == 'network':
+ logger.debug("Creating a network in vcloud director".format(namespace.network_name))
+ network_uuid = vim.create_network(namespace.network_name)
+ if network_uuid is not None:
+ print ("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
+ else:
+ print ("Failed create a new network {}".format(namespace.network_name))
+ elif action == 'vdc' or namespace.action == 'vdc':
+ logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
+ vdc_uuid = vim.create_vdc(namespace.vdc_name)
+ if vdc_uuid is not None:
+ print ("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
+ else:
+ print ("Failed create a new vdc {}".format(namespace.vdc_name))
+ else:
+ return None
+
+
+def validate_uuid4(uuid_string):
+ """Function validate that string contain valid uuid4
+
+ Args:
+ uuid_string - valid UUID string
+
+ Returns:
+ The return true if string contain valid UUID format
+ """
+ try:
+ val = uuid.UUID(uuid_string, version=4)
+ except ValueError:
+ return False
+ return True
+
+
+def upload_image(vim=None, image_file=None):
+ """Function upload image to vcloud director
+
+ Args:
+ image_file - valid UUID string
+
+ Returns:
+ The return true if image uploaded correctly
+ """
+ try:
+ catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
+ if catalog_uuid is not None and validate_uuid4(catalog_uuid):
+ print("Image uploaded and uuid {}".format(catalog_uuid))
+ return True
+ except vimconn.vimconnException as upload_exception:
+ print("Failed uploaded {} image".format(image_file))
+ print("Error Reason: {}".format(upload_exception.message))
+ return False
+
+
+def boot_image(vim=None, image_name=None, vm_name=None):
+ """ Function boot image that resided in vcloud director.
+ The image name can be UUID of name.
+
+ Args:
+ vim - vim connector
+ image_name - image identified by UUID or text string.
+ vm_name - vmname
+
+
+ Returns:
+ The return true if image uploaded correctly
+ """
+
+ vim_catalog = None
+ try:
+ catalogs = vim.vca.get_catalogs()
+ if not validate_uuid4(image_name):
+ vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+ if vim_catalog is None:
+ return None
+ else:
+ vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+ if vim_catalog is None:
+ return None
+
+ print (" Booting {} image id {} ".format(vm_name, vim_catalog))
+ vm_uuid, _ = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
+ if vm_uuid is not None and validate_uuid4(vm_uuid):
+ print("Image booted and vm uuid {}".format(vm_uuid))
+ vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
+ if vapp_dict is not None:
+ print_vapp(vapp_dict=vapp_dict)
+ return True
+ except vimconn.vimconnNotFoundException as notFound:
+ print("Failed boot {} image".format(image_name))
+ print(notFound.message)
+ except vimconn.vimconnException as vimconError:
+ print("Failed boot {} image".format(image_name))
+ print(vimconError.message)
+ except:
+ print("Failed boot {} image".format(image_name))
+
+
+ return False
+
+
+def image_action(vim=None, action=None, namespace=None):
+ """ Function present set of action to manipulate with image.
+ - upload image
+ - boot image.
+ - delete image ( not yet done )
+
+ Args:
+ vim - vcloud director connector
+ action - string (upload/boot etc)
+ namespace - contain other attributes image name etc
+
+ Returns:
+ The return nothing
+ """
+
+ if action == 'upload' or namespace.action == 'upload':
+ upload_image(vim=vim, image_file=namespace.image)
+ elif action == 'boot' or namespace.action == 'boot':
+ boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
+ else:
+ return None
+
+
+def vmwarecli(command=None, action=None, namespace=None):
+ logger.debug("Namespace {}".format(namespace))
+ urllib3.disable_warnings()
+
+ vcduser = None
+ vcdpasword = None
+ vcdhost = None
+ vcdorg = None
+
+ if hasattr(__builtins__, 'raw_input'):
+ input = raw_input
+
+ if namespace.vcdvdc is None:
+ while True:
+ vcduser = input("Enter vcd username: ")
+ if vcduser is not None and len(vcduser) > 0:
+ break
+ else:
+ vcduser = namespace.vcduser
+
+ if namespace.vcdpassword is None:
+ while True:
+ vcdpasword = input("Please enter vcd password: ")
+ if vcdpasword is not None and len(vcdpasword) > 0:
+ break
+ else:
+ vcdpasword = namespace.vcdpassword
+
+ if namespace.vcdhost is None:
+ while True:
+ vcdhost = input("Please enter vcd host name or ip: ")
+ if vcdhost is not None and len(vcdhost) > 0:
+ break
+ else:
+ vcdhost = namespace.vcdhost
+
+ if namespace.vcdorg is None:
+ while True:
+ vcdorg = input("Please enter vcd organization name: ")
+ if vcdorg is not None and len(vcdorg) > 0:
+ break
+ else:
+ vcdorg = namespace.vcdorg
+
+ try:
+ vim = vimconnector(uuid=None,
+ name=vcdorg,
+ tenant_id=None,
+ tenant_name=namespace.vcdvdc,
+ url=vcdhost,
+ url_admin=vcdhost,
+ user=vcduser,
+ passwd=vcdpasword,
+ log_level="DEBUG",
+ config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
+ vim.vca = vim.connect()
+
+ except vimconn.vimconnConnectionException:
+ print("Failed connect to vcloud director. Please check credential and hostname.")
+ return
+
+ # list
+ if command == 'list' or namespace.command == 'list':
+ logger.debug("Client requested list action")
+ # route request to list actions
+ list_actions(vim=vim, action=action, namespace=namespace)
+
+ # view action
+ if command == 'view' or namespace.command == 'view':
+ logger.debug("Client requested view action")
+ view_actions(vim=vim, action=action, namespace=namespace)
+
+ # delete action
+ if command == 'delete' or namespace.command == 'delete':
+ logger.debug("Client requested delete action")
+ delete_actions(vim=vim, action=action, namespace=namespace)
+
+ # create action
+ if command == 'create' or namespace.command == 'create':
+ logger.debug("Client requested create action")
+ create_actions(vim=vim, action=action, namespace=namespace)
+
+ # image action
+ if command == 'image' or namespace.command == 'image':
+ logger.debug("Client requested create action")
+ image_action(vim=vim, action=action, namespace=namespace)
+
+
+if __name__ == '__main__':
+ defaults = {'vcdvdc': 'default',
+ 'vcduser': 'admin',
+ 'vcdpassword': 'admin',
+ 'vcdhost': 'https://localhost',
+ 'vcdorg': 'default',
+ 'debug': 'INFO'}
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
+ parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
+ parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
+ parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
+ parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
+ parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
+ parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
+ parser.add_argument('-d', '--debug', help='debug level', type=int)
+
+ parser_subparsers = parser.add_subparsers(help='commands', dest='command')
+ sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
+ sub_subparsers = sub.add_subparsers(dest='action')
+
+ list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
+ list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
+ list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
+ list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ #list vdc
+ list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
+ list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
+
+ create_sub = parser_subparsers.add_parser('create')
+ create_sub_subparsers = create_sub.add_subparsers(dest='action')
+ create_vms = create_sub_subparsers.add_parser('vms')
+ create_vapp = create_sub_subparsers.add_parser('vapp')
+ create_vapp.add_argument('uuid')
+
+ # add network
+ create_network = create_sub_subparsers.add_parser('network')
+ create_network.add_argument('network_name', action='store', help='create a network for a vdc')
+
+ # add VDC
+ create_vdc = create_sub_subparsers.add_parser('vdc')
+ create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
+
+ delete_sub = parser_subparsers.add_parser('delete')
+ del_sub_subparsers = delete_sub.add_subparsers(dest='action')
+ del_vms = del_sub_subparsers.add_parser('vms')
+ del_vapp = del_sub_subparsers.add_parser('vapp')
+ del_vapp.add_argument('uuid', help='view vapp based on UUID')
+
+ # delete network
+ del_network = del_sub_subparsers.add_parser('network')
+ del_network.add_argument('network_name', action='store',
+ help='- delete network for vcloud director by provided name')
+ del_network.add_argument('-u', '--uuid', default=False, action='store_true',
+ help='delete network for vcloud director by provided uuid')
+
+ # delete vdc
+ del_vdc = del_sub_subparsers.add_parser('vdc')
+
+ view_sub = parser_subparsers.add_parser('view')
+ view_sub_subparsers = view_sub.add_subparsers(dest='action')
+
+ view_vms_parser = view_sub_subparsers.add_parser('vms')
+ view_vms_parser.add_argument('uuid', default=False, action='store_true',
+ help='- View VM for specific uuid in vcloud director')
+ view_vms_parser.add_argument('name', default=False, action='store_true',
+ help='- View VM for specific vapp name in vcloud director')
+
+ # view vapp
+ view_vapp_parser = view_sub_subparsers.add_parser('vapp')
+ view_vapp_parser.add_argument('vapp_name', action='store',
+ help='- view vapp for specific vapp name in vcloud director')
+ view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
+ view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+ # view network
+ view_network = view_sub_subparsers.add_parser('network')
+ view_network.add_argument('network_name', action='store',
+ help='- view network for specific network name in vcloud director')
+ view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
+
+ # view VDC command and actions
+ view_vdc = view_sub_subparsers.add_parser('vdc')
+ view_vdc.add_argument('vdc_name', action='store',
+ help='- View VDC based and action based on provided vdc uuid')
+ view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
+
+ # view organization command and actions
+ view_org = view_sub_subparsers.add_parser('org')
+ view_org.add_argument('org_name', action='store',
+ help='- View VDC based and action based on provided vdc uuid')
+ view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+ # upload image action
+ image_sub = parser_subparsers.add_parser('image')
+ image_subparsers = image_sub.add_subparsers(dest='action')
+ upload_parser = image_subparsers.add_parser('upload')
+ upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
+ upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
+
+ # boot vm action
+ boot_parser = image_subparsers.add_parser('boot')
+ boot_parser.add_argument('image', default=False, action='store', help='- Image name')
+ boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
+ boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+ namespace = parser.parse_args()
+ # put command_line args to mapping
+ command_line_args = {k: v for k, v in vars(namespace).items() if v}
+
+ d = defaults.copy()
+ d.update(os.environ)
+ d.update(command_line_args)
+
+ logger = logging.getLogger('mano.vim.vmware')
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch = logging.StreamHandler()
+ ch.setLevel(str.upper(d['debug']))
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel(getattr(logging, str.upper(d['debug'])))
+ logger.info(
+ "Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
+
+ logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
+
+ # main entry point.
+ vmwarecli(namespace=namespace)
--- /dev/null
+from ubuntu:xenial
+
+MAINTAINER Gennadiy Dubina <gennadiy.dubina@dataat.com>; Alfonso Tierno <alfonso.tiernosepulveda@telefoncia.com>
+
+#avoid extra information from packages
+RUN echo 'path-exclude /usr/share/doc/*\n\
+path-include /usr/share/doc/*/copyright\n\
+path-exclude /usr/share/man/*\n\
+path-exclude /usr/share/groff/*\n\
+path-exclude /usr/share/info/*\n\
+path-exclude /usr/share/lintian/*\n\
+path-exclude /usr/share/linda/*\n'\
+> /etc/dpkg/dpkg.cfg.d/01_nodoc && \
+ echo 'APT::Install-Recommends "false";\n\
+APT::AutoRemove::RecommendsImportant "false";\n\
+APT::AutoRemove::SuggestsImportant "false";\n'\
+> /etc/apt/apt.conf.d/99_norecommends
+
+
+RUN apt-get update && apt-get install -y curl mysql-client software-properties-common \
+ && add-apt-repository -y "deb http://osm-download.etsi.org/repository/osm/debian/ReleaseTHREE stable SO UI RO IM osmclient openvim" \
+ && curl "http://osm-download.etsi.org/repository/osm/debian/ReleaseTHREE/OSM%20ETSI%20Release%20Key.gpg" | apt-key add - \
+ && apt-get update \
+ && apt-get install -y python-osm-ro \
+ && rm -rf /var/lib/apt/lists/* \
+ && mkdir -p /bin/RO
+
+
+VOLUME /opt/openmano/logs
+
+EXPOSE 9090
+
+# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
+# These ENV must be provided
+ENV RO_DB_HOST=""
+ENV RO_DB_OVIM_HOST=""
+ # if empty RO_DB_HOST is assumed
+
+# These ENV should be provided first time for creating database. It will create and init only if empty!
+ENV RO_DB_ROOT_PASSWORD=""
+ENV RO_DB_OVIM_ROOT_PASSWORD=""
+ # if empty RO_DB_ROOT_PASSWORD is assumed
+
+# These ENV can be provided, but default values are ok
+ENV RO_DB_USER=mano
+ENV RO_DB_OVIM_USER=mano
+ENV RO_DB_PASSWORD=manopw
+ENV RO_DB_OVIM_PASSWORD=manopw
+ENV RO_DB_PORT=3306
+ENV RO_DB_OVIM_PORT=3306
+ENV RO_DB_NAME=mano_db
+ENV RO_DB_OVIM_NAME=mano_vim_db
+
+
+CMD RO-start.sh
+
--- /dev/null
+version: '3'
+services:
+ osm-ro-db:
+ image: mysql:5
+ container_name: osm-ro-db
+ restart: always
+ environment:
+ - MYSQL_RANDOM_ROOT_PASSWORD=true
+ - MYSQL_DATABASE=mano_db
+ - MYSQL_USER=mano
+ - MYSQL_PASSWORD=manopw
+ osm-ro-ovim-db:
+ image: mysql:5
+ container_name: osm-ro-ovim-db
+ restart: always
+ environment:
+ - MYSQL_RANDOM_ROOT_PASSWORD=true
+ - MYSQL_DATABASE=mano_vim_db
+ - MYSQL_USER=mano
+ - MYSQL_PASSWORD=manopw
+ osm-ro:
+ build:
+ context: ../
+ dockerfile: docker/Dockerfile-local
+ image: osm/ro
+ container_name: osm-ro
+ restart: always
+ environment:
+ - RO_DB_USER=mano
+ - RO_DB_PASSWORD=manopw
+ - RO_DB_NAME=mano_db
+ - RO_DB_HOST=osm-ro-db
+ - RO_DB_OVIM_USER=mano
+ - RO_DB_OVIM_PASSWORD=manopw
+ - RO_DB_OVIM_NAME=mano_vim_db
+ - RO_DB_OVIM_HOST=osm-ro-ovim-db
+ ports:
+ - "9090:9090"
+ volumes:
+ - /var/log/osm/openmano/logs:/var/log/osm/openmano/logs
+ depends_on:
+ - osm-ro-db
+ - osm-ro-ovim-db
+ links:
+ - osm-ro-db
+ - osm-ro-ovim-db
--- /dev/null
+version: '2'
+services:
+ osm-ro-db:
+ image: mysql:5
+ container_name: osm-ro-db
+ restart: always
+ environment:
+ - MYSQL_ROOT_PASSWORD=osm4u
+ osm-ro:
+ build:
+ context: ../
+ dockerfile: docker/Dockerfile-local
+ image: osm/ro
+ container_name: osm-ro
+ restart: always
+ environment:
+ - RO_DB_ROOT_PASSWORD=osm4u
+ - DB_HOST=osm-ro-db
+ ports:
+ - "9090:9090"
+ volumes:
+ - /var/log/osm/openmano/logs:/var/log/osm
+ depends_on:
+ - osm-ro-db
+ links:
+ - osm-ro-db
+
--- /dev/null
+from ubuntu:xenial
+
+VOLUME /opt/openmano
+VOLUME /var/log/osm
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+
+RUN apt-get update && \
+ apt-get -y install python python-pip mysql-client libmysqlclient-dev && \
+ pip install tox
+
+ENTRYPOINT ["tox"]
--- /dev/null
+# This file is intended to be used by the developer in the local machine
+# in order to run the tests in isolation
+# To do so, cd into osm_ro and run:
+# docker-compose -f ../docker/tests.yml run --rm tox -c <folder to be tested, eg. wim>
+version: '2'
+services:
+ test-db:
+ image: mysql:5
+ container_name: test-db
+ restart: always
+ environment:
+ - MYSQL_ROOT_PASSWORD=osm4u
+ - MYSQL_USER=mano
+ - MYSQL_PASSWORD=manopw
+ - MYSQL_DATABASE=mano_db
+ tox:
+ container_name: tox
+ depends_on:
+ - test-db
+ build:
+ context: ../
+ dockerfile: docker/tests.dockerfile
+ restart: always
+ environment:
+ - RO_DB_ROOT_PASSWORD=osm4u
+ - TEST_DB_HOST=test-db
+ - TEST_DB_USER=mano
+ - TEST_DB_PASSWORD=manopw
+ - TEST_DB_DATABASE=mano_db
+ ports:
+ - "9090:9090"
+ volumes:
+ - ..:/opt/openmano
+ - /tmp/osm/openmano/logs:/var/log/osm
+ entrypoint:
+ - tox
+ working_dir: /opt/openmano/osm_ro
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.1"
+instance:
+ name: complex2-instance
+ description: Example of multisite deployment
+ datacenter: TEST-dc #needed a default datacenter
+ scenario: complex2
+ vnfs:
+ VNF2vms: # vnf name in the scenario
+ datacenter: TEST-dc #by default is datacenter above
+ VNF3:
+ datacenter: openstack
+ networks:
+ dataconn1:
+ sites:
+ - datacenter: openstack
+ netmap-create: null
+ netmap-use: net-corpA
+ - datacenter: TEST-dc
+ netmap-use: data_net
+ dataconn2:
+ sites:
+ - datacenter: openstack
+ netmap-create: null
+ netmap-use: net-corpA
+ - datacenter: TEST-dc
+ netmap-create: datacon
+ default:
+ sites:
+ - datacenter: openstack
+ netmap-use: default
+ - datacenter: TEST-dc
+ netmap-use: default
+
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.1"
+instance:
+ name: complex4-instance
+ description: Example of IP parameters in networks
+ scenario: complex4
+ networks:
+ dataconn1:
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.11.1.0/24
+ gateway-address: 10.11.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.11.1.100
+ count: 150
+ interfaces:
+ - vnf: VNF1
+ vnf_interface: in
+ ip_address: 10.11.1.2
+ - vnf: VNF2
+ vnf_interface: in
+ ip_address: 10.11.1.3
+ dataconn2:
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.11.2.0/24
+ gateway-address: 10.11.2.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.11.2.100
+ count: 150
+ interfaces:
+ - vnf: VNF1
+ vnf_interface: out
+ ip_address: 10.11.2.2
+ - vnf: VNF2
+ vnf_interface: out
+ ip_address: 10.11.2.3
+
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: complex
+ description: Complex network scenario consisting of 4 VNFs interconnected
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux # VNF name as introduced in OPENMANO DB
+ node1:
+ vnf_name: dataplaneVNF1
+ node2:
+ vnf_name: dataplaneVNF2
+ node3:
+ vnf_name: dataplaneVNF2
+ networks:
+ dataconn1:
+ interfaces:
+ - node1: xe0
+ - node2: xe0
+ dataconn2:
+ interfaces:
+ - node1: xe1
+ - node2: xe1
+ dataconn3:
+ interfaces:
+ - node1: xe2
+ - node3: xe0
+ dataconn4:
+ interfaces:
+ - node1: xe3
+ - node3: xe1
+ data-sriov1:
+ interfaces:
+ - node2: xe2
+ - node3: xe2
+ bridge1:
+ interfaces:
+ - linux1: eth0
+ - node2: control
+ - node3: control
+ default:
+ external: true
+ interfaces:
+ - node1: mgmt
+ - node2: mgmt
+ - node3: mgmt
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: complex2
+ description: Complex network scenario consisting of 2 VNFs interconnected
+ public: false
+ vnfs:
+ VNF2vms: # vnf name in the scenario
+ vnf_name: dataplaneVNF_2VMs # openmano vnf name
+ VNF3:
+ vnf_name: dataplaneVNF3
+ networks:
+ dataconn1:
+ interfaces:
+ - VNF2vms: in
+ - VNF3: data0
+ dataconn2:
+ interfaces:
+ - VNF2vms: out
+ - VNF3: data1
+ default:
+ external: true
+ interfaces:
+ - VNF2vms: control0
+ - VNF2vms: control1
+ - VNF3: mgmt
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.3"
+scenario:
+ name: complex3
+ description: Complex network scenario consisting of 2 VNFs interconnected and IP profiles in the networks
+ public: false
+ vnfs:
+ VNF2vms: # vnf name in the scenario
+ vnf_name: dataplaneVNF_2VMs_v02 # openmano vnf name
+ internal_connections:
+ datanet:
+ ip-profile: Null
+ VNF3:
+ vnf_name: dataplaneVNF3
+ networks:
+ dataconn1:
+ type: e-lan
+ implementation: underlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.1.0/24
+ gateway-address: 10.1.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.1.100
+ count: 150
+ interfaces:
+ - vnf: VNF2vms
+ vnf_interface: in
+ ip_address: 10.1.1.2
+ - vnf: VNF3
+ vnf_interface: data0
+ ip_address: 10.1.1.3
+ dataconn2:
+ type: e-line
+ implementation: underlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.2.0/24
+ gateway-address: 10.1.2.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.2.100
+ count: 150
+ interfaces:
+ - vnf: VNF2vms
+ vnf_interface: out
+ ip_address: 10.1.2.2
+ - vnf: VNF3
+ vnf_interface: data1
+ ip_address: 10.1.2.3
+ default:
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.3.0/24
+ gateway-address: 10.1.3.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.3.100
+ count: 150
+ interfaces:
+ - vnf: VNF2vms
+ vnf_interface: control0
+ ip_address: 10.1.3.2
+ - vnf: VNF2vms
+ vnf_interface: control1
+ ip_address: 10.1.3.3
+ - vnf: VNF3
+ vnf_interface: mgmt
+ ip_address: 10.1.3.4
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.3"
+scenario:
+ name: complex4
+ description: Complex network scenario consisting of 2 VNFs interconnected by overlay networks and IP profiles in the networks
+ public: false
+ vnfs:
+ VNF1: # vnf name in the scenario
+ vnf_name: linux_2VMs_v02 # openmano vnf name
+ internal_connections:
+ datanet:
+ ip-profile: Null
+ VNF2:
+ vnf_name: linux_2VMs_v02
+ networks:
+ dataconn1:
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.1.0/24
+ gateway-address: 10.1.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.1.100
+ count: 150
+ interfaces:
+ - vnf: VNF1
+ vnf_interface: in
+ ip_address: 10.1.1.2
+ - vnf: VNF2
+ vnf_interface: in
+ ip_address: 10.1.1.3
+ dataconn2:
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.2.0/24
+ gateway-address: 10.1.2.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.2.100
+ count: 150
+ interfaces:
+ - vnf: VNF1
+ vnf_interface: out
+ ip_address: 10.1.2.2
+ - vnf: VNF2
+ vnf_interface: out
+ ip_address: 10.1.2.3
+ default:
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 10.1.3.0/24
+ gateway-address: 10.1.3.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 10.1.3.100
+ count: 150
+ interfaces:
+ - vnf: VNF1
+ vnf_interface: control0
+ ip_address: 10.1.3.2
+ - vnf: VNF1
+ vnf_interface: control1
+ ip_address: 10.1.3.3
+ - vnf: VNF2
+ vnf_interface: control0
+ ip_address: 10.1.3.4
+ - vnf: VNF2
+ vnf_interface: control1
+ ip_address: 10.1.3.5
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: complex5
+ description: Complex network scenario consisting of 2 dataplane VNFs using image name instead of image location
+ public: false
+ vnfs:
+ VNF1:
+ vnf_name: dataplaneVNF4
+ VNF2:
+ vnf_name: dataplaneVNF4
+ networks:
+ dataconn1:
+ interfaces:
+ - VNF1: in
+ - VNF2: in
+ dataconn2:
+ interfaces:
+ - VNF1: out
+ - VNF2: out
+ default:
+ external: true
+ interfaces:
+ - VNF1: control0
+ - VNF1: control1
+ - VNF2: control0
+ - VNF2: control1
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_additional_disk_based_image
+ description: Just deploy vnf_2_disks
+ public: false # if available for other tenants
+ vnfs:
+ vnf_2_disks: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_additional_disk_based_image #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ internal:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_2_disks: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_additional_disk_empty_volume
+ description: Just deploy vnf_2_disks
+ public: false # if available for other tenants
+ vnfs:
+ vnf_2_disks: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_additional_disk_empty_volume #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ internal:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_2_disks: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_floating_ip
+ description: vnf_floating_ip
+ public: false # if available for other tenants
+ vnfs:
+ vnf_floating_ip: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_floating_ip #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ internal:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_floating_ip: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_no_additional_devices
+ description: Just deploy vnf_2_disks
+ public: false # if available for other tenants
+ vnfs:
+ vnf_2_disks: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_no_additional_devices #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ internal:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_2_disks: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: vnf_no_port_security
+ description: vnf_no_port_security
+ public: false # if available for other tenants
+ vnfs:
+ vnf_no_port_security: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ #vnf_id: 0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e #prefered id method
+ vnf_name: vnf_no_port_security #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ internal:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: bridge
+ external: true #this will be connected outside
+ interfaces:
+ - vnf_no_port_security: mgmt0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple-cloud-init
+ description: Simple network scenario consisting of a single VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux-cloud-init # VNF name as introduced in OPENMANO DB
+ networks:
+ mgmt: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: eth0 # Node and its interface
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: simple
+ description: Simple network scenario consisting of a single VNF connected to an external network
+ vnfs:
+ linux1: # vnf/net name in the scenario
+ vnf_name: linux # VNF name as introduced in OPENMANO DB
+ networks:
+ default: # provide a name for this net or connection
+ external: true
+ interfaces:
+ - linux1: eth0 # Node and its interface
+
--- /dev/null
+nsd:nsd-catalog:
+ nsd:
+ - id: 2linux-sriov
+ name: 2linux_sriov
+ short-name: 2linux_sriov
+ description: Generated by OSM pacakage generator
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: osm_2x.png
+
+ # Specify the VNFDs that are part of this NSD
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: linux-sriov
+ - member-vnf-index: 2
+ vnfd-id-ref: linux-sriov
+ scaling-group-descriptor:
+ - name: "scaling_cirros"
+ vnfd-member:
+ - count: 1
+ member-vnf-index-ref: 1
+ min-instance-count: 0
+ max-instance-count: 10
+ scaling-policy:
+ - scaling-type: "manual"
+ cooldown-time: 10
+ threshold-time: 10
+ name: manual_scale
+ vld:
+ # Networks for the VNFs
+ - id: mgmt
+ mgmt-network: 'true'
+ name: mgmt
+ type: ELAN
+ # vim-network-name: <update>
+ # provider-network:
+ # overlay-type: VLAN
+ # segmentation_id: <update>
+ vnfd-connection-point-ref:
+ # Specify the constituent VNFs
+ # member-vnf-index-ref - entry from constituent vnf
+ # vnfd-id-ref - VNFD id
+ # vnfd-connection-point-ref - connection point name in the VNFD
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: linux-sriov
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: linux-sriov
+ vnfd-connection-point-ref: eth0
+ - id: sriov-vld
+ name: sriov_vld
+ type: ELAN
+ # vim-network-name: <update>
+ # provider-network:
+ # overlay-type: VLAN
+ # segmentation_id: <update>
+ vnfd-connection-point-ref:
+ # Specify the constituent VNFs
+ # member-vnf-index-ref - entry from constituent vnf
+ # vnfd-id-ref - VNFD id
+ # vnfd-connection-point-ref - connection point name in the VNFD
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: linux-sriov
+ vnfd-connection-point-ref: sriov0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: linux-sriov
+ vnfd-connection-point-ref: sriov0
+
--- /dev/null
+nsd:nsd-catalog:
+ nsd:
+ - id: 3vdu_2vnf_nsd
+ name: 3vdu_2vnf_ns-name
+ short-name: 3vdu_2vnf-sname
+ description: 2 vnfs, eatch one with 3 cirros vdu
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: osm_2x.png
+
+ # Specify the VNFDs that are part of this NSD
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: 3vdu_vnfd
+ - member-vnf-index: 2
+ vnfd-id-ref: 3vdu_vnfd
+
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: 31.31.31.210
+ ip-version: ipv4
+ subnet-address: 31.31.31.0/24
+ dns-server:
+ - address: 8.8.8.8
+ - address: 8.8.8.9
+ dhcp-params:
+ count: 200
+ start-address: 31.31.31.2
+ name: ipprofileA
+
+
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ mgmt-network: 'true'
+ name: vld1-name
+ short-name: vld1-sname
+ type: ELAN
+ # vim-network-name: <update>
+ # provider-network:
+ # overlay-type: VLAN
+ # segmentation_id: <update>
+ ip-profile-ref: ipprofileA
+ vnfd-connection-point-ref:
+ # Specify the constituent VNFs
+ # member-vnf-index-ref - entry from constituent vnf
+ # vnfd-id-ref - VNFD id
+ # vnfd-connection-point-ref - connection point name in the VNFD
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: 3vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: 3vdu_vnfd
+ vnfd-connection-point-ref: eth0
--- /dev/null
+nsd:nsd-catalog:
+ nsd:
+ - id: 3vdu_2vnf_1vnffg_nsd
+ name: 3vdu_2vnf_1vnffg_ns-name
+ short-name: 3vdu_2vnf_1vnffg-sname
+ description: 3 vnfs, each one with 2 cirros vdu, with 1 vnffg connecting the vnfs
+ vendor: OSM
+ version: '1.0'
+
+ logo: osm_2x.png
+
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: 2vdu_vnfd
+ - member-vnf-index: 2
+ vnfd-id-ref: 2vdu_vnfd
+ - member-vnf-index: 3
+ vnfd-id-ref: 2vdu_vnfd
+
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: 31.31.31.210
+ ip-version: ipv4
+ subnet-address: 31.31.31.0/24
+ dns-server:
+ - address: 8.8.8.8
+ - address: 8.8.8.9
+ dhcp-params:
+ count: 200
+ start-address: 31.31.31.2
+ name: ipprofileA
+
+
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: vld1-name
+ short-name: vld1-sname
+ type: ELAN
+ # vim-network-name: <update>
+ # provider-network:
+ # overlay-type: VLAN
+ # segmentation_id: <update>
+ ip-profile-ref: ipprofileA
+ vnfd-connection-point-ref:
+ # Specify the constituent VNFs
+ # member-vnf-index-ref - entry from constituent vnf
+ # vnfd-id-ref - VNFD id
+ # vnfd-connection-point-ref - connection point name in the VNFD
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 3
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+
+
+ vnffgd:
+ # VNF Forwarding Graph Descriptors
+ - id: vnffg1
+ name: vnffg1-name
+ short-name: vnffg1-sname
+ description: vnffg1-description
+ vendor: vnffg1-vendor
+ version: '1.0'
+ rsp:
+ - id: rsp1
+ name: rsp1-name
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 2
+ order: 0
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-ingress-connection-point-ref: eth0
+ vnfd-egress-connection-point-ref: eth0
+ - member-vnf-index-ref: 3
+ order: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-ingress-connection-point-ref: eth0
+ vnfd-egress-connection-point-ref: eth0
+ classifier:
+ - id: class1
+ name: class1-name
+ rsp-id-ref: rsp1
+ member-vnf-index-ref: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ match-attributes:
+ - id: match1
+ ip-proto: 6 # TCP
+ source-ip-address: 10.0.0.1
+ destination-ip-address: 10.0.0.2
+ source-port: 0
+ destination-port: 80
+ - id: match2
+ ip-proto: 6 # TCP
+ source-ip-address: 10.0.0.1
+ destination-ip-address: 10.0.0.3
+ source-port: 0
+ destination-port: 80
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+name: insert a name for the scenario
+description: insert a description for the scenario
+topology:
+ nodes:
+ vnf1: # vnf/net name in the scenario
+ type: VNF # VNF, network, external_network (if it is a datacenter network)
+ VNF model: vnf_model1 # VNF name as introduced in OPENMANO DB
+ # vnf_id: 519f03ee-8ab6-11e4-ab4c-52540056c317 # Optionally, instead of the VNF name, the VNF id in Openmano DB can be used
+ vnf2:
+ type: VNF
+ VNF model: vnf_model2
+ # Optional information for display in the openmano-gui: graphical position of the node and its interfaces
+ graph: {"y":399,"x":632,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ bridge1: # Bridge networks must be declared in this section if we want to interconnect VNFs using a Linux bridge
+ type: network
+ model: bridge_net # 'bridge_net' or 'dataplane_net' for 'network' type
+ default: # External networks (datacenter nets) must be declared in this section if we want to interconnect VNFs to them
+ type: external_network
+ model: default # datacenter net name, as introduced in OPENMANO DB
+ connections: # In this section, connections between VNFs and networks are explicited
+ datanet: # name
+ # Data plane connections do not need to include a bridge since they are built through the Openflow Controller
+ nodes: # nodes that will be connected: one or several vnfs, and optionally one additional network declared in nodes section
+ - vnf1: xe0 # First node and its interface to be connected (interfaces must match to one in the VNF descriptor)
+ - vnf2: xe0 # Second node and its interface
+ control net:
+ # Control plane connections must include a bridge network in the list of nodes
+ nodes:
+ - bridge1: null # Bridge networks must be included if we want to interconnect the nodes to that network
+ - vnf1: eth1
+ - vnf2: eth1
+ external net:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ nodes:
+ - default: null # Datacenter networks (external networks) must be included if we want to interconnect the nodes to that network
+ - vnf1: eth0
+ - vnf1: eth0
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: 2
+scenario:
+ name: insert a name for the scenario
+ description: insert a description for the scenario
+ public: false # if available for other tenants
+ vnfs:
+ vnf1: # vnf name in the scenario
+ #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+ vnf_id: fb356022-f664-11e5-a1e7-0800273e724c #prefered id method
+ #vnf_name: openmano_vnf_name #can fail if several vnfs matches this name
+ #graph: {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ vnf2:
+ vnf_name: vnf_name_2 # can fail if several vnfs matches this name
+ graph: {"y":399,"x":632,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+ networks:
+ net1: # network name in the scenario
+ #optional type, deduced from interfaces
+ type: dataplane # "dataplane", "bridge"
+ #graph: {}
+ interfaces: # nodes that will be connected: one or several vnfs
+ - vnf1: xe0 # First node and its interface to be connected (interfaces must match to one in the VNF descriptor)
+ - vnf2: xe0 # Second node and its interface
+ control net:
+ # Control plane connections must include a bridge network in the list of nodes
+ interfaces:
+ - vnf1: eth1
+ - vnf2: eth1
+ out:
+ # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+ type: dataplane
+ external: true #this will be connected outside
+ interfaces:
+ - vnf1: xe1
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+#The mapping is composed of a list of compute nodes. Each compute nodes has two elements:
+#"compute_node": name to identify the compute node within the datacenter
+#"ports": list of ports mapped to a switch for that compute node.
+#The information to identify the SDN controller and the dataplane switch is obtained from the datacenter information
+- compute_node: "compute node 1"
+ ports:
+ #Each mapped port contains the following information:
+ #"pci": pci address of the port in the compute node. This is a mandatory parameter
+ #"switch_mac": MAC address of the corresponding port in the dataplane switch.
+ #"switch_port": Openflow name of the port in the dataplane switch.
+ #"switch_mac" or "switch_port" must be specified. Both of them could be specified
+ - pci: "0000:81:00.0"
+ switch_port: "port-2/1"
+ - pci: "0000:81:00.1"
+ switch_mac: "52:54:00:94:21:22"
+- compute_node: "compute node 2"
+ ports:
+ - pci: "0000:81:00.0"
+ switch_port: "port-2/3"
+ switch_mac: "52:54:00:94:22:21"
+ - pci: "0000:81:00.1"
+ switch_port: "port-2/4"
+ switch_mac: "52:54:00:94:22:22"
--- /dev/null
+##
+# Copyright 2019 ETSI
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+########################################################################
+
+from ubuntu:18.04
+
+MAINTAINER Alfonso Tierno <alfonso.tiernosepulveda@telefoncia.com>
+
+RUN apt-get update && apt-get -y install curl software-properties-common git tox python3-pip \
+ && python3 -m pip install --upgrade pip && python3 -m pip install pyangbind networking-l2gw
+
+ARG REPOSITORY_BASE=http://osm-download.etsi.org/repository/osm/debian
+ARG RELEASE=ReleaseEIGHT-daily
+ARG REPOSITORY_KEY=OSM%20ETSI%20Release%20Key.gpg
+ARG REPOSITORY=testing
+
+RUN curl ${REPOSITORY_BASE}/${RELEASE}/${REPOSITORY_KEY} | apt-key add -
+RUN add-apt-repository -y "deb ${REPOSITORY_BASE}/${RELEASE} ${REPOSITORY} IM common openvim" && apt-get update
+
+ARG RO_VERSION
+ARG IM_VERSION
+
+COPY temp /app
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im${IM_VERSION} \
+ && DEBIAN_FRONTEND=noninteractive dpkg -i --force-depends /app/*.deb \
+ && DEBIAN_FRONTEND=noninteractive apt-get -y -f install
+
+EXPOSE 9090
+
+# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
+# These ENV must be provided
+ENV RO_DB_HOST=""
+ENV RO_DB_OVIM_HOST=""
+ # if empty RO_DB_HOST is assumed
+
+# These ENV should be provided first time for creating database. It will create and init only if empty!
+ENV RO_DB_ROOT_PASSWORD=""
+ENV RO_DB_OVIM_ROOT_PASSWORD=""
+ # if empty RO_DB_ROOT_PASSWORD is assumed
+
+# These ENV can be provided, but default values are ok
+ENV RO_DB_USER=mano
+ENV RO_DB_OVIM_USER=mano
+ENV RO_DB_PASSWORD=manopw
+ENV RO_DB_OVIM_PASSWORD=manopw
+ENV RO_DB_PORT=3306
+ENV RO_DB_OVIM_PORT=3306
+ENV RO_DB_NAME=mano_db
+ENV RO_DB_OVIM_NAME=mano_vim_db
+
+CMD RO-start.sh
--- /dev/null
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+[[ "$*" == *--help* ]] && echo \
+"This script tests docker build based on debian packages. It generates a docker image bases on Dockerfile-devops, " \
+"prints package information and if desired updates OSM RO docker with the generated image.
+Generated packages are stored at './temp' folder.
+Options:
+ --help show this help
+ --no-cache Use if found problems looking for packages
+ --update Use to update OSM, RO docker with this image" && exit 0
+
+[[ "$*" == *--no-cache* ]] && no_cache="--no-cache" || no_cache=""
+[[ "$*" == *--update* ]] && update_osm="k8s" || update_osm=""
+
+HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+export RO_BASE=$(dirname $HERE)
+
+# clean
+docker rm -f ro_pkg 2>/dev/null && echo docker ro_pkg removed
+rm -rf $HERE/temp/*
+find $RO_BASE -name "*.pyc" -exec rm {} ";"
+mkdir -p $HERE/temp
+
+echo -e "\n\n[STAGE 1] Building dockerfile used for the package generation"
+docker build $RO_BASE -f $RO_BASE/Dockerfile -t opensourcemano/ro_pkg
+sleep 2
+
+echo "[STAGE 1.1] Generating packages inside docker ro_pkg"
+docker run -d --name ro_pkg opensourcemano/ro_pkg bash -c 'sleep 3600'
+docker cp $RO_BASE ro_pkg:/RO
+docker exec ro_pkg bash -c 'cd /RO; ./devops-stages/stage-build.sh'
+deb_files=`docker exec ro_pkg bash -c 'ls /RO/deb_dist/'`
+[ -z "$deb_files" ] && echo "No packages generated" >&2 && exit 1
+echo $deb_files
+
+echo -e "\n\n[STAGE 1.2] Print package information and copy to '$HERE/temp/'"
+# print package information and copy to "$HERE/temp/"
+for deb_file in $deb_files ; do
+ echo; echo; echo
+ echo $deb_file info:
+ echo "==========================="
+ docker cp ro_pkg:/RO/deb_dist/$deb_file $HERE/temp/
+ dpkg -I $HERE/temp/$(basename $deb_file)
+done
+
+# docker rm -f ro_pkg
+echo -e "\n\n[STAGE 2] Building docker image opensourcemano/ro:py3_devops based on debian packages"
+docker build $HERE -f $HERE/Dockerfile-devops -t opensourcemano/ro:py3_devops $no_cache ||
+ ! echo "error generating devops dockerfile" >&2 || exit 1
+
+[[ -z "$update_osm" ]] && exit 0
+sleep 2
+
+echo -e "\n\n[STAGE 3] Update service osm_ro with generated docker image"
+# try docker swarm. If fails try kebernetes
+if docker service update osm_ro --force --image opensourcemano/ro:py3_devops 2>/dev/null
+then
+ sleep 2
+ docker container prune -f
+elif kubectl -n osm patch deployment ro --patch \
+ '{"spec": {"template": {"spec": {"containers": [{"name": "ro", "image": "opensourcemano/ro:py3_devops"}]}}}}'
+then
+ kubectl -n osm scale deployment ro --replicas=0
+ kubectl -n osm scale deployment ro --replicas=1
+else
+ echo "Cannot update OSM" && exit 1
+fi
+docker service logs osm_ro
--- /dev/null
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+# Generates a docker image base on Dockerfile-local and update a running docker stack with the generated image
+
+HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+export RO_BASE=$(dirname $HERE)
+
+echo -e "\n\n[STAGE 1] Building docker image opensourcemano/ro:py3_local based on debian packages"
+docker build $RO_BASE -f $RO_BASE/Dockerfile-local -t opensourcemano/ro:py3_local ||
+ ! echo "error generating local dockerfile" >&2 || exit 1
+sleep 2
+docker service update osm_ro --force --image opensourcemano/ro:py3_local
+sleep 2
+docker container prune -f
+docker service logs osm_ro
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: dataplaneVNF1
+ description: "Example of a dataplane VNF consisting of a single VM for data plane workloads with high I/O performance requirements: 14 HW threads, 16 GB hugepages and 4 10G interfaces"
+ external-connections:
+ - name: mgmt
+ type: mgmt # "mgmt"(autoconnect to management net)
+ VNFC: dataplaneVNF1-VM
+ local_iface_name: eth0
+ description: Management interface for general use
+ - name: xe0
+ type: data
+ VNFC: dataplaneVNF1-VM
+ local_iface_name: xe0
+ description: Dataplane interface 1
+ - name: xe1
+ type: data
+ VNFC: dataplaneVNF1-VM
+ local_iface_name: xe1
+ description: Dataplane interface 2
+ - name: xe2
+ type: data
+ VNFC: dataplaneVNF1-VM
+ local_iface_name: xe2
+ description: Dataplane interface 3
+ - name: xe3
+ type: data
+ VNFC: dataplaneVNF1-VM
+ local_iface_name: xe3
+ description: Dataplane interface 4
+ VNFC:
+ - name: dataplaneVNF1-VM
+ description: "Dataplane VM with high I/O performance requirements: 14 HW threads, 16 GB hugepages and 4 10G interfaces"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF1.qcow2
+ numas:
+ - paired-threads: 7 # "cores", "paired-threads", "threads"
+ paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9], [10,11], [12, 13] ]
+ memory: 16 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 10 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ - name: xe2
+ vpci: "0000:00:13.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ - name: xe3
+ vpci: "0000:00:14.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: dataplaneVNF2
+ description: "Example of a dataplane VNF consisting of a single VM for data plane workloads with high I/O performance requirements: 3 cores, 8 GB hugepages, 2 10G interfaces and 1 SR-IOV interface"
+ external-connections:
+ - name: mgmt
+ type: mgmt # "mgmt"(autoconnect to management net)
+ VNFC: dataplaneVNF2-VM
+ local_iface_name: eth0
+ description: Management interface for general use
+ - name: control
+ type: bridge
+ VNFC: dataplaneVNF2-VM
+ local_iface_name: eth1
+ description: Bridge interface
+ - name: xe0
+ type: data
+ VNFC: dataplaneVNF2-VM
+ local_iface_name: xe0
+ description: Dataplane interface 1
+ - name: xe1
+ type: data
+ VNFC: dataplaneVNF2-VM
+ local_iface_name: xe1
+ description: Dataplane interface 2
+ - name: xe2
+ type: data
+ VNFC: dataplaneVNF2-VM
+ local_iface_name: xe2
+ description: Dataplane interface 3 (SR-IOV)
+ VNFC:
+ - name: dataplaneVNF2-VM
+ description: "Dataplane VM with high I/O performance requirements: 3 cores (no hyperthreading), 8 GB hugepages, 2 10G interfaces and 1 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF2.qcow2
+ numas:
+ - cores: 3 # "cores", "paired-threads", "threads"
+ memory: 8 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 10 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ - name: xe2
+ vpci: "0000:00:13.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+ - name: eth1
+ vpci: "0000:00:10.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: dataplaneVNF3
+ description: "Example of a dataplane VNF consisting of one VM with two SR-IOV"
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt
+ type: mgmt
+ VNFC: dataplaneVNF3-VM
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: data0
+ type: data
+ VNFC: dataplaneVNF3-VM
+ local_iface_name: xe0
+ description: Dataplane interface
+ - name: data1
+ type: data
+ VNFC: dataplaneVNF3-VM
+ local_iface_name: xe1
+ description: Dataplane interface
+ VNFC:
+ - name: dataplaneVNF3-VM
+ description: "Dataplane VM with 2 threads, 2 GB hugepages, 2 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF3.qcow2
+ disk: 10
+ numas:
+ - threads: 2 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+ name: dataplaneVNF4
+ description: "Example of a dataplane VNF consisting of two VMs for data plane workloads with one internal network. VMs use image name instead of the path"
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: datanet
+ description: datanet
+ type: e-lan
+ implementation: underlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 192.168.1.0/24
+ gateway-address: 192.168.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 192.168.1.100
+ count: 100
+ elements:
+ - VNFC: VNF_2VMs-VM1
+ local_iface_name: xe0
+ ip_address: 192.168.1.2
+ - VNFC: VNF_2VMs-VM2
+ local_iface_name: xe0
+ ip_address: 192.168.1.3
+ external-connections:
+ - name: control0
+ type: mgmt
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: control1
+ type: mgmt
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: eth0
+ description: control interface VM2
+ - name: in
+ type: data
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: xe1
+ description: Dataplane interface input
+ - name: out
+ type: data
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: xe1
+ description: Dataplane interface output
+ VNFC:
+ - name: VNF_2VMs-VM1
+ description: "Dataplane VM1 with 4 threads, 2 GB hugepages, 2 SR-IOV interface"
+ image name: linux-linux-VM-img
+ disk: 10
+ numas:
+ - paired-threads: 2 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
+ - name: VNF_2VMs-VM2
+ description: "Dataplane VM1 with 2 threads, 2 GB hugepages, 2 SR-IOV interface"
+ image name: linux-linux-VM-img
+ disk: 10
+ numas:
+ - paired-threads: 1 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: dataplaneVNF_2VMs
+ description: "Example of a dataplane VNF consisting of two VMs for data plane workloads with one internal network"
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: datanet
+ description: datanet
+ type: data
+ elements:
+ - VNFC: VNF_2VMs-VM1
+ local_iface_name: xe0
+ - VNFC: VNF_2VMs-VM2
+ local_iface_name: xe0
+ external-connections:
+ - name: control0
+ type: mgmt
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: control1
+ type: mgmt
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: eth0
+ description: control interface VM2
+ - name: in
+ type: data
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: xe1
+ description: Dataplane interface input
+ - name: out
+ type: data
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: xe1
+ description: Dataplane interface output
+ VNFC:
+ - name: VNF_2VMs-VM1
+ description: "Dataplane VM1 with 4 threads, 2 GB hugepages, 2 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF_2VMs.qcow2
+ disk: 10
+ numas:
+ - paired-threads: 2 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
+ - name: VNF_2VMs-VM2
+ description: "Dataplane VM1 with 2 threads, 2 GB hugepages, 2 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF_2VMs.qcow2
+ disk: 10
+ numas:
+ - paired-threads: 1 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+ name: dataplaneVNF_2VMs_v02
+ description: "Example of a dataplane VNF consisting of two VMs for data plane workloads with one internal network"
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: datanet
+ description: datanet
+ type: e-lan
+ implementation: underlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 192.168.1.0/24
+ gateway-address: 192.168.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 192.168.1.100
+ count: 100
+ elements:
+ - VNFC: VNF_2VMs-VM1
+ local_iface_name: xe0
+ ip_address: 192.168.1.2
+ - VNFC: VNF_2VMs-VM2
+ local_iface_name: xe0
+ ip_address: 192.168.1.3
+ external-connections:
+ - name: control0
+ type: mgmt
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: control1
+ type: mgmt
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: eth0
+ description: control interface VM2
+ - name: in
+ type: data
+ VNFC: VNF_2VMs-VM1
+ local_iface_name: xe1
+ description: Dataplane interface input
+ - name: out
+ type: data
+ VNFC: VNF_2VMs-VM2
+ local_iface_name: xe1
+ description: Dataplane interface output
+ VNFC:
+ - name: VNF_2VMs-VM1
+ description: "Dataplane VM1 with 4 threads, 2 GB hugepages, 2 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF_2VMs.qcow2
+ disk: 10
+ numas:
+ - paired-threads: 2 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
+ - name: VNF_2VMs-VM2
+ description: "Dataplane VM1 with 2 threads, 2 GB hugepages, 2 SR-IOV interface"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/dataplaneVNF_2VMs.qcow2
+ disk: 10
+ numas:
+ - paired-threads: 1 # "cores", "paired-threads", "threads"
+ memory: 2 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "no" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 1 Gbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "no"
+ bandwidth: 1 Gbps
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+---
+schema_version: "0.2"
+vnf:
+ name: linux-cloud-init
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ external-connections:
+ - name: eth0
+ type: mgmt
+ description: General purpose interface
+ VNFC: linux-VM
+ local_iface_name: eth0
+ VNFC:
+ - name: linux-VM
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ image name: ubuntu16.04
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 2048 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 20
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:11.0"
+ numas: []
+ boot-data:
+ key-pairs:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+ users:
+ - name: atierno
+ key-pairs:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+ boot-data-drive: true
+ config-files:
+ - content: |
+ auto enp0s3
+ iface enp0s3 inet dhcp
+ dest: /etc/network/interfaces.d/enp0s3.cfg
+ permissions: '0644'
+ owner: root:root
+ - content: |
+ #! /bin/bash
+ ls -al >> /var/log/osm.log
+ dest: /etc/rc.local
+ permissions: '0755'
+ - content: "file content"
+ dest: /etc/test_delete
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: linux
+ description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+ external-connections:
+ - name: eth0
+ type: bridge
+ VNFC: linux-VM
+ local_iface_name: eth0
+ description: General purpose interface
+ VNFC:
+ - name: linux-VM
+ description: Generic Linux Virtual Machine
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/linux.qcow2
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 10
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:11.0"
+ numas: []
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+ name: linux_2VMs_v02
+ description: "Example of a linux VNF consisting of two VMs with one internal network"
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: internalnet
+ description: internalnet
+ type: e-lan
+ implementation: overlay
+ ip-profile:
+ ip-version: IPv4
+ subnet-address: 192.168.1.0/24
+ gateway-address: 192.168.1.1
+ dns-address: 8.8.8.8
+ dhcp:
+ enabled: true
+ start-address: 192.168.1.100
+ count: 100
+ elements:
+ - VNFC: linux_2VMs-VM1
+ local_iface_name: xe0
+ ip_address: 192.168.1.2
+ - VNFC: linux_2VMs-VM2
+ local_iface_name: xe0
+ ip_address: 192.168.1.3
+ external-connections:
+ - name: control0
+ type: mgmt
+ VNFC: linux_2VMs-VM1
+ local_iface_name: eth0
+ description: control interface VM1
+ - name: control1
+ type: mgmt
+ VNFC: linux_2VMs-VM2
+ local_iface_name: eth0
+ description: control interface VM2
+ - name: in
+ type: bridge
+ VNFC: linux_2VMs-VM1
+ local_iface_name: xe1
+ description: data interface input
+ - name: out
+ type: bridge
+ VNFC: linux_2VMs-VM2
+ local_iface_name: xe1
+ description: data interface output
+ VNFC:
+ - name: linux_2VMs-VM1
+ description: "Linux VM1 with 4 CPUs, 2 GB RAM and 3 bridge interfaces"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/linux_VNF_2VMs.qcow2
+ disk: 10
+ vcpus: 4
+ ram: 2048
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+ - name: xe0
+ vpci: "0000:00:11.0"
+ bandwidth: 1 Mbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ bandwidth: 1 Mbps
+ - name: linux_2VMs-VM2
+ description: "Linux VM2 with 2 CPUs, 2 GB RAM and 3 bridge interfaces"
+ #Copy the image to a compute path and edit this path
+ VNFC image: /path/to/imagefolder/linux_VNF_2VMs.qcow2
+ disk: 10
+ vcpus: 2
+ ram: 2048
+ bridge-ifaces:
+ - name: eth0
+ vpci: "0000:00:09.0"
+ bandwidth: 1 Mbps # Optional, informative only
+ - name: xe0
+ vpci: "0000:00:11.0"
+ bandwidth: 1 Mbps
+ - name: xe1
+ vpci: "0000:00:12.0"
+ bandwidth: 1 Mbps
+
--- /dev/null
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: 2vdu_vnfd
+ name: 2vdu_vnfd-name
+ short-name: 2vdu-sname
+ description: Simple VNF example with a cirros and 2 vdu count
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: cirros-64.png
+
+ # Management interface
+ mgmt-interface:
+ vdu-id: 2vduVM
+
+ # Atleast one VDU need to be specified
+ vdu:
+ - id: 2vduVM
+ name: 2vduVM-name
+ description: 2vduVM-description
+ count: 2
+
+ # Flavour of the VM to be instantiated for the VDU
+ # flavor below can fit into m1.micro
+ vm-flavor:
+ vcpu-count: 1
+ memory-mb: 96
+ storage-gb: 0
+
+ # Image/checksum or image including the full path
+ image: 'cirros-0.3.5-x86_64-disk'
+ #checksum:
+
+ interface:
+ # Specify the external interfaces
+ # There can be multiple interfaces defined
+ - name: eth0
+ type: EXTERNAL
+ position: 0
+ virtual-interface:
+ type: OM-MGMT
+ bandwidth: '0'
+ # vnfd-connection-point-ref: eth0
+ external-connection-point-ref: eth0
+
+ # Replace the ssh-rsa public key to use your own public key
+ cloud-init: |
+ #cloud-config
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+ users:
+ - name: osm
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+
+ connection-point:
+ - name: eth0
+ type: VPORT
--- /dev/null
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: 3vdu_vnfd
+ name: 3vdu_vnfd-name
+ short-name: 3vdu-sname
+ description: Simple VNF example with a cirros and 3 vdu count
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: cirros-64.png
+
+ # Management interface
+ mgmt-interface:
+ vdu-id: 3vduVM
+
+ # Atleast one VDU need to be specified
+ vdu:
+ - id: 3vduVM
+ name: 3vduVM-name
+ description: 3vduVM-description
+ count: 3
+
+ # Flavour of the VM to be instantiated for the VDU
+ # flavor below can fit into m1.micro
+ vm-flavor:
+ vcpu-count: 1
+ memory-mb: 2048
+ storage-gb: 20
+
+ # Image/checksum or image including the full path
+ image: 'ubuntu16.04'
+ #checksum:
+
+ interface:
+ # Specify the external interfaces
+ # There can be multiple interfaces defined
+ - name: eth0
+ type: EXTERNAL
+ position: 0
+ virtual-interface:
+ type: VIRTIO
+ bandwidth: '0'
+ # vnfd-connection-point-ref: eth0
+ external-connection-point-ref: eth0
+
+ # Replace the ssh-rsa public key to use your own public key
+ cloud-init: |
+ #cloud-config
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+ users:
+ - name: osm
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+
+ connection-point:
+ - name: eth0
+ type: VPORT
--- /dev/null
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: linux-sriov
+ name: linux_sriov
+ short-name: linux_sriov
+ description: Simple VNF example with a ubuntu using SR-IOV
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: cirros-64.png
+
+ # Management interface
+ mgmt-interface:
+ cp: eth0
+
+ # Atleast one VDU need to be specified
+ vdu:
+ - id: linux-sriov-VM
+ name: linux_sriov_VM
+ description: linux_sriov_VM
+ count: 1
+
+ # Flavour of the VM to be instantiated for the VDU
+ vm-flavor:
+ vcpu-count: 1
+ memory-mb: 2048
+ storage-gb: 20
+
+ # Image/checksum or image including the full path
+ image: ubuntu16.04
+ #checksum:
+
+ interface:
+ # Specify the external interfaces
+ - name: eth0
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: eth0
+ - name: sriov0
+ type: EXTERNAL
+ virtual-interface:
+ type: SR-IOV
+ bandwidth: '0'
+ vpci: 0000:00:0b.0
+ external-connection-point-ref: sriov0
+ connection-point:
+ - name: eth0
+ type: VPORT
+ - name: sriov0
+ type: VPORT
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_additional_disk_based_image
+ description: VNF with additional volume based on image
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: TEMPLATE-VM # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: TEMPLATE-VM # name of Virtual Machine
+ description: TEMPLATE description
+# VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ image name: ubuntu16.04
+ image checksum: 7373edba82a31eedd182d29237b746cf
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ image name: TestVM
+ image checksum: 88d6c77b58fd40a7cb7f44b62bd5ad98
+ size: 1
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_additional_disk_empty_volume
+ description: VNF with additional volume based on image
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: TEMPLATE-VM # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: TEMPLATE-VM # name of Virtual Machine
+ description: TEMPLATE description
+# VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ image name: ubuntu16.04
+ image checksum: 7373edba82a31eedd182d29237b746cf
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ size: 1
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_floating_ip
+ description: VNF disabling port_security option in mgmt interface
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: vnf_floating_ip # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: vnf_floating_ip # name of Virtual Machine
+ description: vnf_floating_ip
+# VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ image name: ubuntu16.04
+ image checksum: 7373edba82a31eedd182d29237b746cf
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ floating-ip: True
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_no_additional_devices
+ description: VNF with additional volume based on image
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: TEMPLATE-VM # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: TEMPLATE-VM # name of Virtual Machine
+ description: TEMPLATE description
+# VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ image name: ubuntu16.04
+ image checksum: 7373edba82a31eedd182d29237b746cf
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: vnf_no_port_security
+ description: VNF disabling port_security option in mgmt interface
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: vnf_no_port_security # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ VNFC: # Virtual machine array
+ - name: vnf_no_port_security # name of Virtual Machine
+ description: vnf_no_port_security
+# VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ image name: ubuntu16.04
+ image checksum: 7373edba82a31eedd182d29237b746cf
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ ram: 1000 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ disk: 5 # disk size in GiB, by default 1
+ #numas:
+ #- paired-threads: 5 # "cores", "paired-threads", "threads"
+ # paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ # memory: 14 # GBytes
+ # interfaces: []
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:0a.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ port-security: False
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ # Additional Virtual Machines would be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: TEMPLATE-2VM
+ description: This is a template to help in the creation of multi-VM VNFs
+ # class: parent # Optional. Used to organize VNFs
+ internal-connections:
+ - name: datanet
+ description: datanet
+ type: data
+ elements:
+ - VNFC: VirtualMachine-1
+ local_iface_name: xe0
+ - VNFC: VirtualMachine-2
+ local_iface_name: xe0
+ - name: controlnet
+ description: controlnet
+ type: bridge
+ elements:
+ - VNFC: VirtualMachine-1
+ local_iface_name: ge0
+ - VNFC: VirtualMachine-2
+ local_iface_name: ge0
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: VirtualMachine-1 # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface 1
+ - name: mgmt1
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: VirtualMachine-2 # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface 2
+ - name: data0
+ type: data # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: VirtualMachine-1 # Virtual Machine this interface belongs to
+ local_iface_name: xe1 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Data interface 1
+ - name: data1
+ type: data # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: VirtualMachine-2 # Virtual Machine this interface belongs to
+ local_iface_name: xe1 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Data interface 2
+ VNFC: # Virtual machine array
+ # First Virtual Machine
+ - name: VirtualMachine-1 # name of Virtual Machine
+ description: VM 1 in the MultiVM template
+ VNFC image: /path/to/imagefolder/TEMPLATE-2VM-VM1.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ # vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ # ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ # disk: 10 # disk size in GiB, by default 1
+ numas:
+ - paired-threads: 5 # "cores", "paired-threads", "threads"
+ paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ memory: 14 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 10 Gbps
+ #mac_address: '20:33:45:56:77:44' #avoid this option if possible
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ #mac_address: '20:33:45:56:77:45' #avoid this option if possible
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:09.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ - name: ge0
+ vpci: "0000:00:10.0"
+ bandwidth: 1 Mbps
+ # mac_address: '20:33:45:56:77:47' # avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ image: /path/to/imagefolder/SECOND-DISK.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ - type: cdrom
+ image: /path/to/imagefolder/CDROM-IMAGE.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ - type: xml
+ image: /path/to/imagefolder/ADDITIONAL-DISK.qcow2 # Optional, depending on the device type
+ image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } # Optional, depending on the device type
+ vpci: "0000:00:03.0" # Optional, depending on the device type (not needed for disk or cdrom)
+ xml: ' xml text for XML described devices. Do not use single quotes inside
+ The following words, if found, will be replaced:
+ __file__ by image path, (image must be provided)
+ __format__ by qcow2 or raw (image must be provided)
+ __dev__ by device letter (b, c, d ...)
+ __vpci__ by vpci (vpci must be provided)
+ '
+ # Second Virtual Machine
+ - name: VirtualMachine-2 # name of Virtual Machine
+ description: VM 2 in the MultiVM template
+ VNFC image: /path/to/imagefolder/TEMPLATE-2VM-VM1.qcow2 # In this case, it is the same as VirtualMachine-1, but it could have been different
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ # vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ # ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ # disk: 10 # disk size in GiB, by default 1
+ numas:
+ - paired-threads: 5 # "cores", "paired-threads", "threads"
+ paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ memory: 14 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 10 Gbps
+ #mac_address: '20:33:45:56:77:44' #avoid this option if possible
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ #mac_address: '20:33:45:56:77:45' #avoid this option if possible
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:09.0" # Optional
+ bandwidth: 1 Mbps # Optional, informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ - name: ge0
+ vpci: "0000:00:10.0"
+ bandwidth: 1 Mbps
+ # mac_address: '20:33:45:56:77:47' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ image: /path/to/imagefolder/SECOND-DISK.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ - type: cdrom
+ image: /path/to/imagefolder/CDROM-IMAGE.qcow2
+ #image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ - type: xml
+ image: /path/to/imagefolder/ADDITIONAL-DISK.qcow2 # Optional, depending on the device type
+ image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } # Optional, depending on the device type
+ vpci: "0000:00:03.0" # Optional, depending on the device type (not needed for disk or cdrom)
+ xml: ' xml text for XML described devices. Do not use single quotes inside
+ The following words, if found, will be replaced:
+ __file__ by image path, (image must be provided)
+ __format__ by qcow2 or raw (image must be provided)
+ __dev__ by device letter (b, c, d ...)
+ __vpci__ by vpci (vpci must be provided)
+ '
+ # Additional Virtual Machines can be included here
+
--- /dev/null
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+ name: TEMPLATE
+ description: This is a template to help in the creation of your own VNFs
+ # class: parent # Optional. Used to organize VNFs
+ external-connections:
+ - name: mgmt0
+ type: mgmt # "mgmt" (autoconnect to management net), "bridge", "data"
+ VNFC: TEMPLATE-VM # Virtual Machine this interface belongs to
+ local_iface_name: mgmt0 # interface name inside this Virtual Machine (must be defined in the VNFC section)
+ description: Management interface
+ - name: xe0
+ type: data
+ VNFC: TEMPLATE-VM
+ local_iface_name: xe0
+ description: Data interface 1
+ - name: xe1
+ type: data
+ VNFC: TEMPLATE-VM
+ local_iface_name: xe1
+ description: Data interface 2
+ - name: ge0
+ type: bridge
+ VNFC: TEMPLATE-VM
+ local_iface_name: ge0
+ description: Bridge interface
+ VNFC: # Virtual machine array
+ - name: TEMPLATE-VM # name of Virtual Machine
+ # count: 1 #by default 1
+ description: TEMPLATE description
+ VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+ # processor: #Optional
+ # model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+ # features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+ # hypervisor: #Optional
+ # type: QEMU-kvm
+ # version: "10002|12001|2.6.32-358.el6.x86_64"
+ # vcpus: 1 # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+ # ram: 1024 # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+ # disk: 10 # disk size in GiB, by default 1
+ numas:
+ - paired-threads: 5 # "cores", "paired-threads", "threads"
+ paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+ memory: 14 # GBytes
+ interfaces:
+ - name: xe0
+ vpci: "0000:00:11.0"
+ dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+ bandwidth: 10 Gbps
+ # mac_address: '20:33:45:56:77:44' #avoid this option if possible
+ - name: xe1
+ vpci: "0000:00:12.0"
+ dedicated: "yes"
+ bandwidth: 10 Gbps
+ # mac_address: '20:33:45:56:77:45' #avoid this option if possible
+ bridge-ifaces:
+ - name: mgmt0
+ vpci: "0000:00:09.0" # Optional. Virtual PCI address
+ bandwidth: 1 Mbps # Optional. Informative only
+ # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ - name: ge0
+ vpci: "0000:00:10.0"
+ bandwidth: 1 Mbps
+ # mac_address: '20:33:45:56:77:47' #avoid this option if possible
+ # model: 'virtio' # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+ devices: # Optional, order determines device letter asignation (hda, hdb, ...)
+ - type: disk # "disk","cdrom","xml"
+ image: /path/to/imagefolder/SECOND-DISK.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ # vpci: "0000:00:03.0" # Optional, not for disk or cdrom
+ - type: cdrom
+ image: /path/to/imagefolder/CDROM-IMAGE.qcow2
+ # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+ - type: xml
+ image: /path/to/imagefolder/ADDITIONAL-DISK.qcow2 # Optional, depending on the device type
+ image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } # Optional, depending on the device type
+ vpci: "0000:00:03.0" # Optional, depending on the device type (not needed for disk or cdrom)
+ xml: ' xml text for XML described devices. Do not use single quotes inside
+ The following words, if found, will be replaced:
+ __file__ by image path, (image must be provided)
+ __format__ by qcow2 or raw (image must be provided)
+ __dev__ by device letter (b, c, d ...)
+ __vpci__ by vpci (vpci must be provided)
+ '
+ # Additional Virtual Machines would be included here
+