##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
-Apache License
+ Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
LICENSE_HEAD='/**
-* Copyright 2017 Telefónica Investigación y Desarrollo, S.A.U.
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
* This file is part of openmano
* All Rights Reserved.
*
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
/**
-* Copyright 2017 Telefónica Investigación y Desarrollo, S.A.U.
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
* This file is part of openmano
* All Rights Reserved.
*
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30 #0.5.60 => 30
#[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31 #0.5.61 => 31
#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32 #0.5.70 => 32
+#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33 #0.5.82 => 33
#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34 #0.6.00 => 34
#TODO ... put next versions here
sql "DELETE FROM schema_version WHERE version_int='32';"
}
+function upgrade_to_33(){
+ echo " Add PDU information to 'vms"
+ sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
+ sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
+}
+function downgrade_from_33(){
+ echo " Remove back PDU information from' vms'"
+ sql "ALTER TABLE vms DROP COLUMN pdu_type;"
+ sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
+ sql "DELETE FROM schema_version WHERE version_int='33';"
+}
+
+
function upgrade_to_X(){
echo " change 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
LABEL authors="Gennadiy Dubina, Alfonso Tierno, Gerardo Garcia"
-COPY . /root/RO
-
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \
apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install git python python-pip && \
DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
- DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \
- DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \
- DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \
- DEBIAN_FRONTEND=noninteractive pip install -U pyang pyangbind && \
- DEBIAN_FRONTEND=noninteractive pip3 install -U pyang pyangbind && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto python-networkx && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
+ DEBIAN_FRONTEND=noninteractive pip2 install pip==9.0.3 && \
+ DEBIAN_FRONTEND=noninteractive pip2 install -U progressbar pyvmomi pyvcloud==19.1.1 && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
- DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \
- DEBIAN_FRONTEND=noninteractive pip install untangle && \
- DEBIAN_FRONTEND=noninteractive pip install -e git+https://github.com/python-oca/python-oca#egg=oca && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-bitarray && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client && \
- mkdir -p /root/RO && \
- make -C /root/RO clean all BRANCH=master && \
- dpkg -i /root/RO/IM/deb_dist/python-pyang_*.deb && \
- dpkg -i /root/RO/IM/deb_dist/python-pyangbind_*.deb && \
- dpkg -i /root/RO/IM/deb_dist/python-osm-im*.deb && \
- dpkg -i /root/RO/openvim/.build/python-lib-osm-openvim*.deb && \
- dpkg -i /root/RO/.build/python-osm-ro*.deb && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install python-networkx && \
+ DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
+ DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client
+
+COPY . /root/RO
+
+RUN /root/RO/scripts/install-osm-im.sh --develop && \
+ /root/RO/scripts/install-lib-osm-openvim.sh --develop && \
+ make -C /root/RO prepare && \
+ mkdir -p /var/log/osm && \
+ pip2 install -e /root/RO/build && \
rm -rf /root/.cache && \
apt-get clean && \
- rm -rf /var/lib/apt/lists/* && \
- rm -rf /root/RO
+ rm -rf /var/lib/apt/lists/*
VOLUME /var/log/osm
# RO_DB_OVIM_PORT: default value '3306'
# RO_DB_NAME: default value 'mano_db'
# RO_DB_OVIM_NAME: default value 'mano_vim_db'
+# RO_LOG_FILE: default log to stderr if not defined
ENV RO_DB_HOST="" \
RO_DB_OVIM_HOST="" \
RO_DB_OVIM_PORT=3306 \
RO_DB_NAME=mano_db \
RO_DB_OVIM_NAME=mano_vim_db \
- OPENMANO_TENANT=osm
+ OPENMANO_TENANT=osm \
+ RO_LOG_LEVEL=DEBUG
CMD RO-start.sh
-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# PYTHON_ARGCOMPLETE_OK
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
"""
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$09-oct-2014 09:09:48$"
-__version__ = "0.4.23-r533"
-version_date = "May 2018"
+__version__ = "0.4.24-r534"
+version_date = "Nov 2018"
from argcomplete.completers import FilesCompleter
import os
if not vnfds:
vnfds = vnfd_catalog.get("vnfd")
vnfd = vnfds[0]
- vdu_list = vnfd["vdu"]
+ vdu_list = vnfd.get("vdu")
else: # old API
api_version = ""
token = "vnfs"
vnfd = myvnf['vnf']
- vdu_list = vnfd["VNFC"]
+ vdu_list = vnfd.get("VNFC")
if args.name or args.description or args.image_path or args.image_name or args.image_checksum:
# TODO, change this for API v3
vnfd['name'] = args.name
if args.description:
vnfd['description'] = args.description
- if args.image_path:
- index = 0
- for image_path_ in args.image_path.split(","):
- # print "image-path", image_path_
- if api_version == "/v3":
- if vdu_list[index].get("image"):
- vdu_list[index]['image'] = image_path_
- if "image-checksum" in vdu_list[index]:
- del vdu_list[index]["image-checksum"]
- else: # image name in volumes
- vdu_list[index]["volumes"][0]["image"] = image_path_
- if "image-checksum" in vdu_list[index]["volumes"][0]:
- del vdu_list[index]["volumes"][0]["image-checksum"]
- else:
- vdu_list[index]['VNFC image'] = image_path_
- if "image name" in vdu_list[index]:
- del vdu_list[index]["image name"]
- if "image checksum" in vdu_list[index]:
- del vdu_list[index]["image checksum"]
- index += 1
- if args.image_name: # image name precedes if both are supplied
- index = 0
- for image_name_ in args.image_name.split(","):
- if api_version == "/v3":
- if vdu_list[index].get("image"):
- vdu_list[index]['image'] = image_name_
- if "image-checksum" in vdu_list[index]:
- del vdu_list[index]["image-checksum"]
- if vdu_list[index].get("alternative-images"):
- for a_image in vdu_list[index]["alternative-images"]:
- a_image['image'] = image_name_
- if "image-checksum" in a_image:
- del a_image["image-checksum"]
- else: # image name in volumes
- vdu_list[index]["volumes"][0]["image"] = image_name_
- if "image-checksum" in vdu_list[index]["volumes"][0]:
- del vdu_list[index]["volumes"][0]["image-checksum"]
- else:
- vdu_list[index]['image name'] = image_name_
- if "VNFC image" in vdu_list[index]:
- del vdu_list[index]["VNFC image"]
- index += 1
- if args.image_checksum:
- index = 0
- for image_checksum_ in args.image_checksum.split(","):
- if api_version == "/v3":
- if vdu_list[index].get("image"):
- vdu_list[index]['image-checksum'] = image_checksum_
- if vdu_list[index].get("alternative-images"):
- for a_image in vdu_list[index]["alternative-images"]:
- a_image['image-checksum'] = image_checksum_
- else: # image name in volumes
- vdu_list[index]["volumes"][0]["image-checksum"] = image_checksum_
- else:
- vdu_list[index]['image checksum'] = image_checksum_
- index += 1
+ if vdu_list:
+ if args.image_path:
+ index = 0
+ for image_path_ in args.image_path.split(","):
+ # print "image-path", image_path_
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image'] = image_path_
+ if "image-checksum" in vdu_list[index]:
+ del vdu_list[index]["image-checksum"]
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image"] = image_path_
+ if "image-checksum" in vdu_list[index]["volumes"][0]:
+ del vdu_list[index]["volumes"][0]["image-checksum"]
+ else:
+ vdu_list[index]['VNFC image'] = image_path_
+ if "image name" in vdu_list[index]:
+ del vdu_list[index]["image name"]
+ if "image checksum" in vdu_list[index]:
+ del vdu_list[index]["image checksum"]
+ index += 1
+ if args.image_name: # image name precedes if both are supplied
+ index = 0
+ for image_name_ in args.image_name.split(","):
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image'] = image_name_
+ if "image-checksum" in vdu_list[index]:
+ del vdu_list[index]["image-checksum"]
+ if vdu_list[index].get("alternative-images"):
+ for a_image in vdu_list[index]["alternative-images"]:
+ a_image['image'] = image_name_
+ if "image-checksum" in a_image:
+ del a_image["image-checksum"]
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image"] = image_name_
+ if "image-checksum" in vdu_list[index]["volumes"][0]:
+ del vdu_list[index]["volumes"][0]["image-checksum"]
+ else:
+ vdu_list[index]['image name'] = image_name_
+ if "VNFC image" in vdu_list[index]:
+ del vdu_list[index]["VNFC image"]
+ index += 1
+ if args.image_checksum:
+ index = 0
+ for image_checksum_ in args.image_checksum.split(","):
+ if api_version == "/v3":
+ if vdu_list[index].get("image"):
+ vdu_list[index]['image-checksum'] = image_checksum_
+ if vdu_list[index].get("alternative-images"):
+ for a_image in vdu_list[index]["alternative-images"]:
+ a_image['image-checksum'] = image_checksum_
+ else: # image name in volumes
+ vdu_list[index]["volumes"][0]["image-checksum"] = image_checksum_
+ else:
+ vdu_list[index]['image checksum'] = image_checksum_
+ index += 1
except (KeyError, TypeError), e:
if str(e) == 'vnf': error_pos= "missing field 'vnf'"
elif str(e) == 'name': error_pos= "missing field 'vnf':'name'"
if not scenario:
print "you must provide a scenario in the file descriptor or with --scenario"
return -1
- myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
+ if isinstance(scenario, str):
+ myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
if args.netmap_use:
if "networks" not in myInstance["instance"]:
myInstance["instance"]["networks"] = {}
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
import sys
import getopt
import yaml
-from os import getenv as os_getenv, path as os_path
+from os import environ, path as os_path
from jsonschema import validate as js_v, exceptions as js_e
import logging
import logging.handlers as log_handlers
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$26-aug-2014 11:09:29$"
__version__ = "0.6.00"
-version_date = "Sep 2018"
+version_date = "Nov 2018"
database_version = 34 # expected database schema version
-
global global_config
global logger
if __name__=="__main__":
- env_config = {
- 'db_host': 'RO_DB_HOST',
- 'db_name': 'RO_DB_NAME',
- 'db_user': 'RO_DB_USER',
- 'db_passwd': 'RO_DB_PASSWORD',
- 'db_ovim_host': 'RO_DB_OVIM_HOST',
- 'db_ovim_name': 'RO_DB_OVIM_NAME',
- 'db_ovim_user': 'RO_DB_OVIM_USER',
- 'db_ovim_passwd': 'RO_DB_OVIM_PASSWORD',
- 'db_port': 'RO_DB_PORT',
- 'db_port': 'RO_DB_PORT',
+ # env2config contains envioron variable names and the correspondence with configuration file openmanod.cfg keys.
+ # If this environ is defined, this value is taken instead of the one at at configuration file
+ env2config = {
+ 'RO_DB_HOST': 'db_host',
+ 'RO_DB_NAME': 'db_name',
+ 'RO_DB_USER': 'db_user',
+ 'RO_DB_PASSWORD': 'db_passwd',
+ # 'RO_DB_PORT': 'db_port',
+ 'RO_DB_OVIM_HOST': 'db_ovim_host',
+ 'RO_DB_OVIM_NAME': 'db_ovim_name',
+ 'RO_DB_OVIM_USER': 'db_ovim_user',
+ 'RO_DB_OVIM_PASSWORD': 'db_ovim_passwd',
+ # 'RO_DB_OVIM_PORT': 'db_ovim_port',
+ 'RO_LOG_LEVEL': 'log_level',
+ 'RO_LOG_FILE': 'log_file',
}
# Configure logging step 1
hostname = socket.gethostname()
'severity:%(levelname)s logger:%(name)s log:%(message)s'.format(
host=hostname),
datefmt='%Y-%m-%dT%H:%M:%S')
- log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
+ log_format_simple = "%(asctime)s %(levelname)s %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
logging.basicConfig(format=log_format_simple, level= logging.DEBUG)
logger = logging.getLogger('openmano')
global_config['log_socket_port'] = log_socket_port
# override with ENV
- for config_key, env_var in env_config.items():
- if os_getenv(env_var):
- global_config[config_key] = os_getenv(env_var)
-
+ for env_k, env_v in environ.items():
+ try:
+ if not env_k.startswith("RO_") or env_k not in env2config or not env_v:
+ continue
+ global_config[env2config[env_k]] = env_v
+ if env_k.endswith("PORT"): # convert to int, skip if not possible
+ global_config[env2config[env_k]] = int(env_v)
+ except Exception as e:
+ logger.warn("skipping environ '{}={}' because exception '{}'".format(env_k, env_v, e))
# if vnf_repository is not None:
# global_config['vnf_repository'] = vnf_repository
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
cp_name2iface_uuid = {}
cp_name2vm_uuid = {}
cp_name2db_interface = {}
+ vdu_id2cp_name = {} # stored only when one external connection point is presented at this VDU
# table vms (vdus)
vdu_id2uuid = {}
"osm_id": vdu_id,
"name": get_str(vdu, "name", 255),
"description": get_str(vdu, "description", 255),
+ "pdu_type": get_str(vdu, "pdu-type", 255),
"vnf_id": vnf_uuid,
}
vdu_id2uuid[db_vm["osm_id"]] = vm_uuid
# table interfaces (internal/external interfaces)
flavor_epa_interfaces = []
- vdu_id2cp_name = {} # stored only when one external connection point is presented at this VDU
# for iface in chain(vdu.get("internal-interface").itervalues(), vdu.get("external-interface").itervalues()):
for iface in vdu.get("interface").itervalues():
flavor_epa_interface = {}
if iface.get("virtual-interface").get("type") == "OM-MGMT":
db_interface["type"] = "mgmt"
- elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000"):
+ elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
db_interface["type"] = "bridge"
db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
"'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
httperrors.Bad_Request)
- db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+ if vdu_id2db_table_index[vdu_id]:
+ db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
# TODO consider the case of isolation and not colocation
# if pg.get("strategy") == "ISOLATION":
mgmt_access["vm_id"] = vdu_id2uuid[vnfd["mgmt-interface"]["vdu-id"]]
# if only one cp is defined by this VDU, mark this interface as of type "mgmt"
if vdu_id2cp_name.get(mgmt_vdu_id):
- cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
+ if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:
+ cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
if vnfd["mgmt-interface"].get("ip-address"):
mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
if vnfd["mgmt-interface"].get("cp"):
if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
- raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp':'{cp}'. "
+ raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
"Reference to a non-existing connection-point".format(
vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
httperrors.Bad_Request)
mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
# mark this interface as of type mgmt
- cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
+ if cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]:
+ cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
default_user = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
"default-user", 64)
SELECT=('vms.uuid as uuid', 'vms.osm_id as osm_id', 'vms.name as name', 'vms.description as description',
'boot_data'),
WHERE={'vnfs.uuid': vnf_id} )
- if len(content)==0:
- raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+ if len(content) != 0:
+ #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
# change boot_data into boot-data
- for vm in content:
- if vm.get("boot_data"):
- vm["boot-data"] = yaml.safe_load(vm["boot_data"])
- del vm["boot_data"]
+ for vm in content:
+ if vm.get("boot_data"):
+ vm["boot-data"] = yaml.safe_load(vm["boot_data"])
+ del vm["boot_data"]
- data['vnf']['VNFC'] = content
+ data['vnf']['VNFC'] = content
#TODO: GET all the information from a VNFC and include it in the output.
#GET NET
rollbackList = []
# print "Checking that the scenario exists and getting the scenario dictionary"
- scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
- datacenter_id=default_datacenter_id)
+ if isinstance(scenario, str):
+ scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
+ datacenter_id=default_datacenter_id)
+ else:
+ scenarioDict = scenario
+ scenarioDict["uuid"] = None
# logger.debug(">>>>>> Dictionaries before merging")
# logger.debug(">>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
number_mgmt_networks = 0
db_instance_nets = []
for sce_net in scenarioDict['nets']:
+ sce_net_uuid = sce_net.get('uuid', sce_net["name"])
# get involved datacenters where this network need to be created
involved_datacenters = []
- for sce_vnf in scenarioDict.get("vnfs"):
+ for sce_vnf in scenarioDict.get("vnfs", ()):
vnf_datacenter = sce_vnf.get("datacenter", default_datacenter_id)
if vnf_datacenter in involved_datacenters:
continue
if sce_vnf_ifaces.get("sce_net_id") == sce_net["uuid"]:
involved_datacenters.append(vnf_datacenter)
break
+ if not involved_datacenters:
+ involved_datacenters.append(default_datacenter_id)
descriptor_net = {}
if instance_dict.get("networks") and instance_dict["networks"].get(sce_net["name"]):
descriptor_net = instance_dict["networks"][sce_net["name"]]
net_name = descriptor_net.get("vim-network-name")
- sce_net2instance[sce_net['uuid']] = {}
- net2task_id['scenario'][sce_net['uuid']] = {}
+ # add datacenters from instantiation parameters
+ if descriptor_net.get("sites"):
+ for site in descriptor_net["sites"]:
+ if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
+ involved_datacenters.append(site["datacenter"])
+ sce_net2instance[sce_net_uuid] = {}
+ net2task_id['scenario'][sce_net_uuid] = {}
if sce_net["external"]:
number_mgmt_networks += 1
myvim_thread_id = myvim_threads_id[datacenter_id]
net_type = sce_net['type']
+ net_vim_name = None
lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'} # 'shared': True
if not net_name:
# fill database content
net_uuid = str(uuid4())
uuid_list.append(net_uuid)
- sce_net2instance[sce_net['uuid']][datacenter_id] = net_uuid
+ sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
db_net = {
"uuid": net_uuid,
'vim_net_id': None,
+ "vim_name": net_vim_name,
"instance_scenario_id": instance_uuid,
- "sce_net_id": sce_net["uuid"],
+ "sce_net_id": sce_net.get("uuid"),
"created": create_network,
'datacenter_id': datacenter_id,
'datacenter_tenant_id': myvim_thread_id,
"item_id": net_uuid,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
- net2task_id['scenario'][sce_net['uuid']][datacenter_id] = task_index
+ net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
task_index += 1
db_vim_actions.append(db_vim_action)
"myvims": myvims,
"cloud_config": cloud_config,
"RO_pub_key": tenant[0].get('RO_pub_key'),
+ "instance_parameters": instance_dict,
}
vnf_params_out = {
"task_index": task_index,
"sce_net2instance": sce_net2instance,
}
# sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
- for sce_vnf in scenarioDict['vnfs']: # sce_vnf_list:
+ for sce_vnf in scenarioDict.get('vnfs', ()): # sce_vnf_list:
instantiate_vnf(mydb, sce_vnf, vnf_params, vnf_params_out, rollbackList)
task_index = vnf_params_out["task_index"]
uuid_list = vnf_params_out["uuid_list"]
# Create VNFFGs
# task_depends_on = []
- for vnffg in scenarioDict['vnffgs']:
+ for vnffg in scenarioDict.get('vnffgs', ()):
for rsp in vnffg['rsps']:
sfs_created = []
for cp in rsp['connection_points']:
db_net = {
"uuid": net_uuid,
'vim_net_id': None,
+ "vim_name": net_name,
"instance_scenario_id": instance_uuid,
"net_id": net["uuid"],
"created": True,
if sce_vnf.get('mgmt_access'):
ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access')
vnf_availability_zones = []
- for vm in sce_vnf['vms']:
+ for vm in sce_vnf.get('vms'):
vm_av = vm.get('availability_zone')
if vm_av and vm_av not in vnf_availability_zones:
vnf_availability_zones.append(vm_av)
db_instance_vnfs.append(db_instance_vnf)
for vm in sce_vnf['vms']:
+ # skip PDUs
+ if vm.get("pdu_type"):
+ continue
+
myVMDict = {}
sce_vnf_name = sce_vnf['member_vnf_index'] if sce_vnf['member_vnf_index'] else sce_vnf['name']
myVMDict['name'] = "{}-{}-{}".format(instance_name[:64], sce_vnf_name[:64], vm["name"][:64])
myVMDict['networks'] = []
task_depends_on = []
# TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+ is_management_vm = False
db_vm_ifaces = []
for iface in vm['interfaces']:
netDict = {}
Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
else:
raise NfvoException(e_text, httperrors.Internal_Server_Error)
- if netDict["use"] == "mgmt" or netDict["use"] == "bridge":
+ if netDict["use"] == "mgmt":
+ is_management_vm = True
+ netDict["type"] = "virtual"
+ if netDict["use"] == "bridge":
netDict["type"] = "virtual"
if iface.get("vpci"):
netDict['vpci'] = iface['vpci']
# We add the RO key to cloud_config if vnf will need ssh access
cloud_config_vm = cloud_config
- if ssh_access and ssh_access['required'] and ssh_access['default-user'] and tenant[0].get('RO_pub_key'):
- RO_key = {"key-pairs": [tenant[0]['RO_pub_key']]}
- cloud_config_vm = unify_cloud_config(cloud_config_vm, RO_key)
+ if is_management_vm and params["instance_parameters"].get("mgmt_keys"):
+ cloud_config_vm = unify_cloud_config({"key-pairs": params["instance_parameters"]["mgmt_keys"]},
+ cloud_config_vm)
+
+ if vm.get("instance_parameters") and vm["instance_parameters"].get("mgmt_keys"):
+ cloud_config_vm = unify_cloud_config({"key-pairs": vm["instance_parameters"]["mgmt_keys"]},
+ cloud_config_vm)
+ # if ssh_access and ssh_access['required'] and ssh_access['default-user'] and tenant[0].get('RO_pub_key'):
+ # RO_key = {"key-pairs": [tenant[0]['RO_pub_key']]}
+ # cloud_config_vm = unify_cloud_config(cloud_config_vm, RO_key)
if vm.get("boot_data"):
cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm)
# "number_tasks": 0 # filled bellow
}
- # 2.1 deleting VMs
- # vm_fail_list=[]
- for sce_vnf in instanceDict['vnfs']:
- datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
- vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
- if datacenter_key not in myvims:
- try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
- except NfvoException as e:
- logger.error(str(e))
- myvim_thread = None
- myvim_threads[datacenter_key] = myvim_thread
- vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
- datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
- if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
- sce_vnf["datacenter_tenant_id"]))
- myvims[datacenter_key] = None
- else:
- myvims[datacenter_key] = vims.values()[0]
- myvim = myvims[datacenter_key]
- myvim_thread = myvim_threads[datacenter_key]
- for vm in sce_vnf['vms']:
- if not myvim:
- error_msg += "\n VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
- continue
- db_vim_action = {
- "instance_action_id": instance_action_id,
- "task_index": task_index,
- "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
- "action": "DELETE",
- "status": "SCHEDULED",
- "item": "instance_vms",
- "item_id": vm["uuid"],
- "extra": yaml.safe_dump({"params": vm["interfaces"]},
- default_flow_style=True, width=256)
- }
- db_vim_actions.append(db_vim_action)
- for interface in vm["interfaces"]:
- if not interface.get("instance_net_id"):
- continue
- if interface["instance_net_id"] not in net2vm_dependencies:
- net2vm_dependencies[interface["instance_net_id"]] = []
- net2vm_dependencies[interface["instance_net_id"]].append(task_index)
- task_index += 1
-
- # 2.2 deleting NETS
- # net_fail_list=[]
- for net in instanceDict['nets']:
- vimthread_affected[net["datacenter_tenant_id"]] = None
- datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+ # 2.1 deleting VNFFGs
+ for sfp in instanceDict.get('sfps', ()):
+ vimthread_affected[sfp["datacenter_tenant_id"]] = None
+ datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
if datacenter_key not in myvims:
try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
except NfvoException as e:
logger.error(str(e))
myvim_thread = None
myvim_threads[datacenter_key] = myvim_thread
- vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
- datacenter_tenant_id=net["datacenter_tenant_id"])
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+ datacenter_tenant_id=sfp["datacenter_tenant_id"])
if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
myvims[datacenter_key] = None
else:
myvims[datacenter_key] = vims.values()[0]
myvim_thread = myvim_threads[datacenter_key]
if not myvim:
- error_msg += "\n Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
+ error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
continue
- extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
- if net2vm_dependencies.get(net["uuid"]):
- extra["depends_on"] = net2vm_dependencies[net["uuid"]]
+ extra = {"params": (sfp['vim_sfp_id'])}
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
- "datacenter_vim_id": net["datacenter_tenant_id"],
+ "datacenter_vim_id": sfp["datacenter_tenant_id"],
"action": "DELETE",
"status": "SCHEDULED",
- "item": "instance_nets",
- "item_id": net["uuid"],
+ "item": "instance_sfps",
+ "item_id": sfp["uuid"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
db_vim_actions.append(db_vim_action)
- # 2.3 deleting VNFFGs
-
- for sfp in instanceDict.get('sfps', ()):
- vimthread_affected[sfp["datacenter_tenant_id"]] = None
- datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ for classification in instanceDict['classifications']:
+ vimthread_affected[classification["datacenter_tenant_id"]] = None
+ datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
if datacenter_key not in myvims:
try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
except NfvoException as e:
logger.error(str(e))
myvim_thread = None
myvim_threads[datacenter_key] = myvim_thread
- vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
- datacenter_tenant_id=sfp["datacenter_tenant_id"])
+ vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+ datacenter_tenant_id=classification["datacenter_tenant_id"])
if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"],
+ classification["datacenter_tenant_id"]))
myvims[datacenter_key] = None
else:
myvims[datacenter_key] = vims.values()[0]
myvim_thread = myvim_threads[datacenter_key]
if not myvim:
- error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+ error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'],
+ classification["datacenter_id"])
continue
- extra = {"params": (sfp['vim_sfp_id'])}
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+ extra = {"params": (classification['vim_classification_id']), "depends_on": depends_on}
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
- "datacenter_vim_id": sfp["datacenter_tenant_id"],
+ "datacenter_vim_id": classification["datacenter_tenant_id"],
"action": "DELETE",
"status": "SCHEDULED",
- "item": "instance_sfps",
- "item_id": sfp["uuid"],
+ "item": "instance_classifications",
+ "item_id": classification["uuid"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
if datacenter_key not in myvims:
try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
except NfvoException as e:
logger.error(str(e))
myvim_thread = None
if not myvim:
error_msg += "\n vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
continue
- extra = {"params": (sf['vim_sf_id'])}
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+ extra = {"params": (sf['vim_sf_id']), "depends_on": depends_on}
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
if datacenter_key not in myvims:
try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
except NfvoException as e:
logger.error(str(e))
myvim_thread = None
if not myvim:
error_msg += "\n vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
continue
- extra = {"params": (sfi['vim_sfi_id'])}
+ depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfs"]
+ extra = {"params": (sfi['vim_sfi_id']), "depends_on": depends_on}
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
task_index += 1
db_vim_actions.append(db_vim_action)
- for classification in instanceDict['classifications']:
- vimthread_affected[classification["datacenter_tenant_id"]] = None
- datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+ # 2.2 deleting VMs
+ # vm_fail_list=[]
+ for sce_vnf in instanceDict.get('vnfs', ()):
+ datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+ vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
if datacenter_key not in myvims:
try:
- _,myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+ _, myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
except NfvoException as e:
logger.error(str(e))
myvim_thread = None
myvim_threads[datacenter_key] = myvim_thread
- vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
- datacenter_tenant_id=classification["datacenter_tenant_id"])
+ vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
+ datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
if len(vims) == 0:
- logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"], classification["datacenter_tenant_id"]))
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
+ sce_vnf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ for vm in sce_vnf['vms']:
+ if not myvim:
+ error_msg += "\n VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+ continue
+ sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vm["uuid"],
+ "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
+ default_flow_style=True, width=256)
+ }
+ db_vim_actions.append(db_vim_action)
+ for interface in vm["interfaces"]:
+ if not interface.get("instance_net_id"):
+ continue
+ if interface["instance_net_id"] not in net2vm_dependencies:
+ net2vm_dependencies[interface["instance_net_id"]] = []
+ net2vm_dependencies[interface["instance_net_id"]].append(task_index)
+ task_index += 1
+
+ # 2.3 deleting NETS
+ # net_fail_list=[]
+ for net in instanceDict['nets']:
+ vimthread_affected[net["datacenter_tenant_id"]] = None
+ datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, net["datacenter_id"], net["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
+ datacenter_tenant_id=net["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
myvims[datacenter_key] = None
else:
myvims[datacenter_key] = vims.values()[0]
myvim_thread = myvim_threads[datacenter_key]
if not myvim:
- error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'], classification["datacenter_id"])
+ error_msg += "\n Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
continue
- extra = {"params": (classification['vim_classification_id'])}
+ extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
+ if net2vm_dependencies.get(net["uuid"]):
+ extra["depends_on"] = net2vm_dependencies[net["uuid"]]
+ sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+ if len(sfi_dependencies) > 0:
+ if "depends_on" in extra:
+ extra["depends_on"] += sfi_dependencies
+ else:
+ extra["depends_on"] = sfi_dependencies
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
- "datacenter_vim_id": classification["datacenter_tenant_id"],
+ "datacenter_vim_id": net["datacenter_tenant_id"],
"action": "DELETE",
"status": "SCHEDULED",
- "item": "instance_classifications",
- "item_id": classification["uuid"],
+ "item": "instance_nets",
+ "item_id": net["uuid"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"description": "SCALE",
}
vm_result["instance_action_id"] = instance_action_id
+ vm_result["created"] = []
+ vm_result["deleted"] = []
task_index = 0
for vdu in action_dict["vdu-scaling"]:
vdu_id = vdu.get("vdu-id")
member_vnf_index = vdu.get("member-vnf-index")
vdu_count = vdu.get("count", 1)
if vdu_id:
- target_vm = mydb.get_rows(
+ target_vms = mydb.get_rows(
FROM="instance_vms as vms join instance_vnfs as vnfs on vms.instance_vnf_id=vnfs.uuid",
WHERE={"vms.uuid": vdu_id},
ORDER_BY="vms.created_at"
)
- if not target_vm:
+ if not target_vms:
raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
else:
if not osm_vdu_id and not member_vnf_index:
- raise NfvoException("Invalid imput vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
- target_vm = mydb.get_rows(
+ raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
+ target_vms = mydb.get_rows(
# SELECT=("ivms.uuid", "ivnfs.datacenter_id", "ivnfs.datacenter_tenant_id"),
FROM="instance_vms as ivms join instance_vnfs as ivnfs on ivms.instance_vnf_id=ivnfs.uuid"\
" join sce_vnfs as svnfs on ivnfs.sce_vnf_id=svnfs.uuid"\
" join vms on ivms.vm_id=vms.uuid",
- WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index},
+ WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index,
+ "ivnfs.instance_scenario_id": instance_id},
ORDER_BY="ivms.created_at"
)
- if not target_vm:
+ if not target_vms:
raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
- vdu_id = target_vm[-1]["uuid"]
- vm_result[vdu_id] = {"created": [], "deleted": [], "description": "scheduled"}
- target_vm = target_vm[-1]
+ vdu_id = target_vms[-1]["uuid"]
+ target_vm = target_vms[-1]
datacenter = target_vm["datacenter_id"]
myvim_threads_id[datacenter], _ = get_vim_thread(mydb, nfvo_tenant, datacenter)
+
if vdu["type"] == "delete":
- # look for nm
- vm_interfaces = None
- for sce_vnf in instanceDict['vnfs']:
- for vm in sce_vnf['vms']:
- if vm["uuid"] == vdu_id:
- vm_interfaces = vm["interfaces"]
- break
+ for index in range(0, vdu_count):
+ target_vm = target_vms[-1-index]
+ vdu_id = target_vm["uuid"]
+ # look for nm
+ vm_interfaces = None
+ for sce_vnf in instanceDict['vnfs']:
+ for vm in sce_vnf['vms']:
+ if vm["uuid"] == vdu_id:
+ vm_interfaces = vm["interfaces"]
+ break
- db_vim_action = {
- "instance_action_id": instance_action_id,
- "task_index": task_index,
- "datacenter_vim_id": target_vm["datacenter_tenant_id"],
- "action": "DELETE",
- "status": "SCHEDULED",
- "item": "instance_vms",
- "item_id": target_vm["uuid"],
- "extra": yaml.safe_dump({"params": vm_interfaces},
- default_flow_style=True, width=256)
- }
- task_index += 1
- db_vim_actions.append(db_vim_action)
- vm_result[vdu_id]["deleted"].append(vdu_id)
- # delete from database
- db_instance_vms.append({"TO-DELETE": vdu_id})
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": target_vm["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_vms",
+ "item_id": vdu_id,
+ "extra": yaml.safe_dump({"params": vm_interfaces},
+ default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ vm_result["deleted"].append(vdu_id)
+ # delete from database
+ db_instance_vms.append({"TO-DELETE": vdu_id})
else: # vdu["type"] == "create":
iface2iface = {}
vm_name = target_vm.get('vim_name')
try:
suffix = vm_name.rfind("-")
- vm_name = vm_name[:suffix+1] + str(1 + int(vm_name[suffix+1:]))
+ vm_name = vm_name[:suffix+1] + str(index + 1 + int(vm_name[suffix+1:]))
except Exception:
pass
db_instance_vm = {
}
task_index += 1
db_vim_actions.append(db_vim_action)
- vm_result[vdu_id]["created"].append(vm_uuid)
+ vm_result["created"].append(vm_uuid)
db_instance_action["number_tasks"] = task_index
db_tables = [
try:
datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
except ovimException as e:
- raise NfvoException("Error deleting datacenter-port-mapping " + str(e), HTTP_Conflict)
+ raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
mydb.update_rows('datacenters', datacenter_descriptor, where)
if new_sdn_port_mapping:
except ovimException as e:
# Rollback
mydb.update_rows('datacenters', datacenter, where)
- raise NfvoException("Error adding datacenter-port-mapping " + str(e), HTTP_Conflict)
+ raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
return datacenter_id
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# vms
cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
" vms.description as description, vms.boot_data as boot_data, count," \
- " vms.availability_zone as availability_zone, vms.osm_id as osm_id" \
+ " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
" FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
" WHERE vnfs.uuid='" + vnf['vnf_id'] + "'" \
" ORDER BY vms.created_at"
self.cur.execute(cmd)
instance_dict['vnfs'] = self.cur.fetchall()
for vnf in instance_dict['vnfs']:
-
+ vnf["ip_address"] = None
vnf_mgmt_access_iface = None
vnf_mgmt_access_vm = None
if vnf["mgmt_access"]:
vm['interfaces'] = self.cur.fetchall()
for iface in vm['interfaces']:
if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
- vnf["ip_address"] = iface["ip_address"]
+ if not vnf["ip_address"]:
+ vnf["ip_address"] = iface["ip_address"]
if iface["type"] == "mgmt" and iface["ip_address"]:
vm_manage_iface_list.append(iface["ip_address"])
if not verbose:
del iface["uuid"]
if vm_manage_iface_list:
vm["ip_address"] = ",".join(vm_manage_iface_list)
- if vnf_mgmt_access_vm == vm["vm_uuid"]:
- vnf["ip_address"] = vm["ip_address"]
- elif not vnf.get("ip_address"):
+ if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
vnf["ip_address"] = vm["ip_address"]
del vm["vm_uuid"]
#from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
# "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
#where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
- cmd = "SELECT uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id"\
- " FROM instance_nets" \
- " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
+ "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
+ "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name " \
+ "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
+ "left join nets on inets.net_id=nets.uuid " \
+ "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
self.logger.debug(cmd)
self.cur.execute(cmd)
instance_dict['nets'] = self.cur.fetchall()
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 } #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
-pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\[\]]$"}
+# allows [] for wildcards. For that reason huge length limit is set
+pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\[\]]{12,40}$"}
http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
"bandwidth":bandwidth_schema,
"vpci":pci_schema,
"mac_address": mac_schema,
- "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]},
+ "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139", "paravirt"]},
"port-security": {"type" : "boolean"},
"floating-ip": {"type" : "boolean"}
},
"additionalProperties": False
}
+instance_scenario_object = {
+ "title": "scenario object used to create an instance not based on any nsd",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "nets": {
+ "type": "array",
+ "minLength": 1,
+ "items": {
+ "type": "object",
+ "properties": {
+ "name": name_schema,
+ "external": {"type": "boolean"},
+ "type": {"enum": ["bridge", "ptp", "data"]}, # for overlay, underlay E-LINE, underlay E-LAN
+ },
+ "additionalProperties": False,
+ "required": ["name", "external", "type"]
+ }
+ }
+ },
+ "additionalProperties": False,
+ "required": ["nets"]
+}
+
instance_scenario_create_schema_v01 = {
- "title":"instance scenario create information schema v0.1",
+ "title": "instance scenario create information schema v0.1",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
+ "type": "object",
+ "properties": {
"schema_version": {"type": "string", "enum": ["0.1"]},
- "instance":{
- "type":"object",
- "properties":{
- "name":name_schema,
+ "instance": {
+ "type": "object",
+ "properties": {
+ "mgmt_keys": {"type": "array", "items": {"type":"string"}},
+ "vduImage": name_schema,
+ "name": name_schema,
"description":description_schema,
"datacenter": name_schema,
- "scenario" : name_schema, #can be an UUID or name
+ "scenario" : {"oneOff": [name_schema, instance_scenario_object]}, # can be an UUID or name or a dict
"action":{"enum": ["deploy","reserve","verify" ]},
"connect_mgmt_interfaces": {"oneOf": [{"type":"boolean"}, {"type":"object"}]},# can be true or a dict with datacenter: net_name
"cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
".": {
"type": "object",
"properties": {
- "name": name_schema, # overrides vdu name schema
+ "name": name_schema, # overrides vdu name schema
+ "mgmt_keys": {"type": "array", "items": {"type": "string"}},
+ "vduImage": name_schema,
"devices": {
"type": "object",
"patternProperties": {
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#general logging parameters
#choose among: DEBUG, INFO, WARNING, ERROR, CRITICAL
-log_level: DEBUG #general log levels for internal logging
+log_level: INFO #general log levels for internal logging
#standard output is used unless 'log_file' is specify
#log_file: /var/log/openmano/openmano.log
'123', 'openstackvim', '456', '789', 'http://dummy.url', None,
'user', 'pass')
- def _test_new_sfi(self, create_port_pair, sfc_encap,
+ def _test_new_sfi(self, create_sfc_port_pair, sfc_encap,
ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']):
# input to VIM connector
# + ingress_ports
# + egress_ports
# TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
if sfc_encap is not None:
if not sfc_encap:
correlation = None
'egress': egress_ports[0] if len(egress_ports) else None,
'service_function_parameters': {'correlation': correlation}
}}
- create_port_pair.return_value = dict_from_neutron
+ create_sfc_port_pair.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
sfc_encap)
# assert that the VIM connector made the expected call to OpenStack
- create_port_pair.assert_called_with(dict_to_neutron)
+ create_sfc_port_pair.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
self.assertEqual(result, dict_from_neutron['port_pair']['id'])
- def _test_new_sf(self, create_port_pair_group):
+ def _test_new_sf(self, create_sfc_port_pair_group):
# input to VIM connector
name = 'osm_sf'
instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
"egress_n_tuple": {}
}}
}}
- create_port_pair_group.return_value = dict_from_neutron
+ create_sfc_port_pair_group.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
result = self.vimconn.new_sf(name, instances)
# assert that the VIM connector made the expected call to OpenStack
- create_port_pair_group.assert_called_with(dict_to_neutron)
+ create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
self.assertEqual(result, dict_from_neutron['port_pair_group']['id'])
- def _test_new_sfp(self, create_port_chain, sfc_encap, spi):
+ def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
# input to VIM connector
name = 'osm_sfp'
classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
'd8bfdb5d-195e-4f34-81aa-6135705317df']
# TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
chain_id = 33
- if sfc_encap is not None:
- if not sfc_encap:
- correlation = None
if spi:
chain_id = spi
'port_pair_groups': sfs,
'chain_parameters': {'correlation': correlation}
}}
- create_port_chain.return_value = dict_from_neutron
+ create_sfc_port_chain.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
sfc_encap, spi)
# assert that the VIM connector made the expected call to OpenStack
- create_port_chain.assert_called_with(dict_to_neutron)
+ create_sfc_port_chain.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
self.assertEqual(result, dict_from_neutron['port_chain']['id'])
- def _test_new_classification(self, create_flow_classifier, ctype):
+ def _test_new_classification(self, create_sfc_flow_classifier, ctype):
# input to VIM connector
name = 'osm_classification'
definition = {'ethertype': 'IPv4',
'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
dict_from_neutron['flow_classifier'][
'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
- create_flow_classifier.return_value = dict_from_neutron
+ create_sfc_flow_classifier.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
result = self.vimconn.new_classification(name, ctype, definition)
# assert that the VIM connector made the expected call to OpenStack
- create_flow_classifier.assert_called_with(dict_to_neutron)
+ create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
self.assertEqual(result, dict_from_neutron['flow_classifier']['id'])
- @mock.patch.object(Client, 'create_flow_classifier')
- def test_new_classification(self, create_flow_classifier):
- self._test_new_classification(create_flow_classifier,
+ @mock.patch.object(Client, 'create_sfc_flow_classifier')
+ def test_new_classification(self, create_sfc_flow_classifier):
+ self._test_new_classification(create_sfc_flow_classifier,
'legacy_flow_classifier')
- @mock.patch.object(Client, 'create_flow_classifier')
- def test_new_classification_unsupported_type(self, create_flow_classifier):
+ @mock.patch.object(Client, 'create_sfc_flow_classifier')
+ def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
self.assertRaises(vimconn.vimconnNotSupportedException,
self._test_new_classification,
- create_flow_classifier, 'h265')
+ create_sfc_flow_classifier, 'h265')
- @mock.patch.object(Client, 'create_port_pair')
- def test_new_sfi_with_sfc_encap(self, create_port_pair):
- self._test_new_sfi(create_port_pair, True)
+ @mock.patch.object(Client, 'create_sfc_port_pair')
+ def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, True)
- @mock.patch.object(Client, 'create_port_pair')
- def test_new_sfi_without_sfc_encap(self, create_port_pair):
- self._test_new_sfi(create_port_pair, False)
+ @mock.patch.object(Client, 'create_sfc_port_pair')
+ def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, False)
- @mock.patch.object(Client, 'create_port_pair')
- def test_new_sfi_default_sfc_encap(self, create_port_pair):
- self._test_new_sfi(create_port_pair, None)
+ @mock.patch.object(Client, 'create_sfc_port_pair')
+ def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, None)
- @mock.patch.object(Client, 'create_port_pair')
- def test_new_sfi_bad_ingress_ports(self, create_port_pair):
+ @mock.patch.object(Client, 'create_sfc_port_pair')
+ def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc',
'a0273f64-82c9-11e7-b08f-6328e53f0fa7']
self.assertRaises(vimconn.vimconnNotSupportedException,
self._test_new_sfi,
- create_port_pair, True, ingress_ports=ingress_ports)
+ create_sfc_port_pair, True, ingress_ports=ingress_ports)
ingress_ports = []
self.assertRaises(vimconn.vimconnNotSupportedException,
self._test_new_sfi,
- create_port_pair, True, ingress_ports=ingress_ports)
+ create_sfc_port_pair, True, ingress_ports=ingress_ports)
- @mock.patch.object(Client, 'create_port_pair')
- def test_new_sfi_bad_egress_ports(self, create_port_pair):
+ @mock.patch.object(Client, 'create_sfc_port_pair')
+ def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967',
'b41228fe-82c9-11e7-9b44-17504174320b']
self.assertRaises(vimconn.vimconnNotSupportedException,
self._test_new_sfi,
- create_port_pair, True, egress_ports=egress_ports)
+ create_sfc_port_pair, True, egress_ports=egress_ports)
egress_ports = []
self.assertRaises(vimconn.vimconnNotSupportedException,
self._test_new_sfi,
- create_port_pair, True, egress_ports=egress_ports)
+ create_sfc_port_pair, True, egress_ports=egress_ports)
@mock.patch.object(vimconnector, 'get_sfi')
- @mock.patch.object(Client, 'create_port_pair_group')
- def test_new_sf(self, create_port_pair_group, get_sfi):
- get_sfi.return_value = {'sfc_encap': 'mpls'}
- self._test_new_sf(create_port_pair_group)
+ @mock.patch.object(Client, 'create_sfc_port_pair_group')
+ def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
+ get_sfi.return_value = {'sfc_encap': True}
+ self._test_new_sf(create_sfc_port_pair_group)
@mock.patch.object(vimconnector, 'get_sfi')
- @mock.patch.object(Client, 'create_port_pair_group')
- def test_new_sf_inconsistent_sfc_encap(self, create_port_pair_group,
+ @mock.patch.object(Client, 'create_sfc_port_pair_group')
+ def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group,
get_sfi):
get_sfi.return_value = {'sfc_encap': 'nsh'}
self.assertRaises(vimconn.vimconnNotSupportedException,
- self._test_new_sf, create_port_pair_group)
+ self._test_new_sf, create_sfc_port_pair_group)
- @mock.patch.object(Client, 'create_port_chain')
- def test_new_sfp_with_sfc_encap(self, create_port_chain):
- self._test_new_sfp(create_port_chain, True, None)
+ @mock.patch.object(Client, 'create_sfc_port_chain')
+ def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, True, None)
- @mock.patch.object(Client, 'create_port_chain')
- def test_new_sfp_without_sfc_encap(self, create_port_chain):
- self.assertRaises(vimconn.vimconnNotSupportedException,
- self._test_new_sfp,
- create_port_chain, False, None)
- self.assertRaises(vimconn.vimconnNotSupportedException,
- self._test_new_sfp,
- create_port_chain, False, 25)
+ @mock.patch.object(Client, 'create_sfc_port_chain')
+ def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, False, None)
+ self._test_new_sfp(create_sfc_port_chain, False, 25)
- @mock.patch.object(Client, 'create_port_chain')
- def test_new_sfp_default_sfc_encap(self, create_port_chain):
- self._test_new_sfp(create_port_chain, None, None)
+ @mock.patch.object(Client, 'create_sfc_port_chain')
+ def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, None, None)
- @mock.patch.object(Client, 'create_port_chain')
- def test_new_sfp_with_sfc_encap_spi(self, create_port_chain):
- self._test_new_sfp(create_port_chain, True, 25)
+ @mock.patch.object(Client, 'create_sfc_port_chain')
+ def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, True, 25)
- @mock.patch.object(Client, 'create_port_chain')
- def test_new_sfp_default_sfc_encap_spi(self, create_port_chain):
- self._test_new_sfp(create_port_chain, None, 25)
+ @mock.patch.object(Client, 'create_sfc_port_chain')
+ def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, None, 25)
- @mock.patch.object(Client, 'list_flow_classifier')
- def test_get_classification_list(self, list_flow_classifier):
+ @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ def test_get_classification_list(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_flow_classifier.return_value = {'flow_classifiers': [
+ list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
{'source_port_range_min': 2000,
'destination_ip_prefix': '192.168.3.0/24',
'protocol': 'udp',
result = self.vimconn.get_classification_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
- list_flow_classifier.assert_called_with(**filter_dict)
+ list_sfc_flow_classifiers.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
self.assertEqual(result, [
'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
'name': 'osm_sfi'}])
- @mock.patch.object(Client, 'list_port_pair')
- def test_get_sfi_list_with_sfc_encap(self, list_port_pair):
- self._test_get_sfi_list(list_port_pair, 'nsh', True)
+ @mock.patch.object(Client, 'list_sfc_port_pairs')
+ def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
+ self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True)
- @mock.patch.object(Client, 'list_port_pair')
- def test_get_sfi_list_without_sfc_encap(self, list_port_pair):
- self._test_get_sfi_list(list_port_pair, None, False)
+ @mock.patch.object(Client, 'list_sfc_port_pairs')
+ def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
+ self._test_get_sfi_list(list_sfc_port_pairs, None, False)
- @mock.patch.object(Client, 'list_port_pair_group')
- def test_get_sf_list(self, list_port_pair_group):
+ @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ def test_get_sf_list(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair_group.return_value = {'port_pair_groups': [
+ list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
{'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
'0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
'description': '',
result = self.vimconn.get_sf_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair_group.assert_called_with(**filter_dict)
+ list_sfc_port_pair_groups.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
self.assertEqual(result, [
- {'instances': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
+ {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
'0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
'name': 'osm_sf'}])
- def _test_get_sfp_list(self, list_port_chain, correlation, sfc_encap):
+ def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
# what OpenStack is assumed to return to the VIM connector
- list_port_chain.return_value = {'port_chains': [
+ list_sfc_port_chains.return_value = {'port_chains': [
{'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
'7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
result = self.vimconn.get_sfp_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
- list_port_chain.assert_called_with(**filter_dict)
+ list_sfc_port_chains.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
self.assertEqual(result, [
'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
'name': 'osm_sfp'}])
- @mock.patch.object(Client, 'list_port_chain')
- def test_get_sfp_list_with_sfc_encap(self, list_port_chain):
- self._test_get_sfp_list(list_port_chain, 'nsh', True)
+ @mock.patch.object(Client, 'list_sfc_port_chains')
+ def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
+ self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True)
- @mock.patch.object(Client, 'list_port_chain')
- def test_get_sfp_list_without_sfc_encap(self, list_port_chain):
- self._test_get_sfp_list(list_port_chain, None, False)
+ @mock.patch.object(Client, 'list_sfc_port_chains')
+ def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
+ self._test_get_sfp_list(list_sfc_port_chains, None, False)
- @mock.patch.object(Client, 'list_flow_classifier')
- def test_get_classification(self, list_flow_classifier):
+ @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ def test_get_classification(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_flow_classifier.return_value = {'flow_classifiers': [
+ list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
{'source_port_range_min': 2000,
'destination_ip_prefix': '192.168.3.0/24',
'protocol': 'udp',
'22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
# assert that VIM connector called OpenStack with the expected filter
- list_flow_classifier.assert_called_with(
+ list_sfc_flow_classifiers.assert_called_with(
id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
# assert that VIM connector successfully returned the OpenStack result
self.assertEqual(result,
'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
})
- @mock.patch.object(Client, 'list_flow_classifier')
- def test_get_classification_many_results(self, list_flow_classifier):
+ @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ def test_get_classification_many_results(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_flow_classifier.return_value = {'flow_classifiers': [
+ list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
{'source_port_range_min': 2000,
'destination_ip_prefix': '192.168.3.0/24',
'protocol': 'udp',
'3196bafc-82dd-11e7-a205-9bf6c14b0721')
# assert the VIM connector called OpenStack with the expected filter
- list_flow_classifier.assert_called_with(
+ list_sfc_flow_classifiers.assert_called_with(
id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
- @mock.patch.object(Client, 'list_flow_classifier')
- def test_get_classification_no_results(self, list_flow_classifier):
+ @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ def test_get_classification_no_results(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_flow_classifier.return_value = {'flow_classifiers': []}
+ list_sfc_flow_classifiers.return_value = {'flow_classifiers': []}
# call the VIM connector
self.assertRaises(vimconn.vimconnNotFoundException,
'3196bafc-82dd-11e7-a205-9bf6c14b0721')
# assert the VIM connector called OpenStack with the expected filter
- list_flow_classifier.assert_called_with(
+ list_sfc_flow_classifiers.assert_called_with(
id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
- @mock.patch.object(Client, 'list_port_pair')
- def test_get_sfi(self, list_port_pair):
+ @mock.patch.object(Client, 'list_sfc_port_pairs')
+ def test_get_sfi(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair.return_value = {'port_pairs': [
+ list_sfc_port_pairs.return_value = {'port_pairs': [
{'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966')
# assert the VIM connector called OpenStack with the expected filter
- list_port_pair.assert_called_with(
+ list_sfc_port_pairs.assert_called_with(
id='c121ebdd-7f2d-4213-b933-3325298a6966')
# assert the VIM connector successfully returned the OpenStack result
self.assertEqual(result,
'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
'name': 'osm_sfi1'})
- @mock.patch.object(Client, 'list_port_pair')
- def test_get_sfi_many_results(self, list_port_pair):
+ @mock.patch.object(Client, 'list_sfc_port_pairs')
+ def test_get_sfi_many_results(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair.return_value = {'port_pairs': [
+ list_sfc_port_pairs.return_value = {'port_pairs': [
{'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
'c0436d92-82db-11e7-8f9c-5fa535f1261f')
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair.assert_called_with(
+ list_sfc_port_pairs.assert_called_with(
id='c0436d92-82db-11e7-8f9c-5fa535f1261f')
- @mock.patch.object(Client, 'list_port_pair')
- def test_get_sfi_no_results(self, list_port_pair):
+ @mock.patch.object(Client, 'list_sfc_port_pairs')
+ def test_get_sfi_no_results(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair.return_value = {'port_pairs': []}
+ list_sfc_port_pairs.return_value = {'port_pairs': []}
# call the VIM connector
self.assertRaises(vimconn.vimconnNotFoundException,
'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair.assert_called_with(
+ list_sfc_port_pairs.assert_called_with(
id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
- @mock.patch.object(Client, 'list_port_pair_group')
- def test_get_sf(self, list_port_pair_group):
+ @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ def test_get_sf(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair_group.return_value = {'port_pair_groups': [
+ list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
{'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757')
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair_group.assert_called_with(
+ list_sfc_port_pair_groups.assert_called_with(
id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
# assert that VIM connector successfully returned the OpenStack result
self.assertEqual(result,
- {'instances': [
- '08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
- 'description': '',
+ {'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+ 'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
'name': 'osm_sf1'})
- @mock.patch.object(Client, 'list_port_pair_group')
- def test_get_sf_many_results(self, list_port_pair_group):
+ @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ def test_get_sf_many_results(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair_group.return_value = {'port_pair_groups': [
+ list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
{'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
'description': '',
'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair_group.assert_called_with(
+ list_sfc_port_pair_groups.assert_called_with(
id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
- @mock.patch.object(Client, 'list_port_pair_group')
- def test_get_sf_no_results(self, list_port_pair_group):
+ @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ def test_get_sf_no_results(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair_group.return_value = {'port_pair_groups': []}
+ list_sfc_port_pair_groups.return_value = {'port_pair_groups': []}
# call the VIM connector
self.assertRaises(vimconn.vimconnNotFoundException,
'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
# assert that VIM connector called OpenStack with the expected filter
- list_port_pair_group.assert_called_with(
+ list_sfc_port_pair_groups.assert_called_with(
id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
- @mock.patch.object(Client, 'list_port_chain')
- def test_get_sfp(self, list_port_chain):
+ @mock.patch.object(Client, 'list_sfc_port_chains')
+ def test_get_sfp(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_port_chain.return_value = {'port_chains': [
+ list_sfc_port_chains.return_value = {'port_chains': [
{'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
'description': '',
result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47')
# assert that VIM connector called OpenStack with the expected filter
- list_port_chain.assert_called_with(
+ list_sfc_port_chains.assert_called_with(
id='821bc9be-82d7-11e7-8ce3-23a08a27ab47')
# assert that VIM connector successfully returned the OpenStack result
self.assertEqual(result,
'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
'name': 'osm_sfp1'})
- @mock.patch.object(Client, 'list_port_chain')
- def test_get_sfp_many_results(self, list_port_chain):
+ @mock.patch.object(Client, 'list_sfc_port_chains')
+ def test_get_sfp_many_results(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_port_chain.return_value = {'port_chains': [
+ list_sfc_port_chains.return_value = {'port_chains': [
{'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
'description': '',
'5d002f38-82de-11e7-a770-f303f11ce66a')
# assert that VIM connector called OpenStack with the expected filter
- list_port_chain.assert_called_with(
+ list_sfc_port_chains.assert_called_with(
id='5d002f38-82de-11e7-a770-f303f11ce66a')
- @mock.patch.object(Client, 'list_port_chain')
- def test_get_sfp_no_results(self, list_port_chain):
+ @mock.patch.object(Client, 'list_sfc_port_chains')
+ def test_get_sfp_no_results(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_port_chain.return_value = {'port_chains': []}
+ list_sfc_port_chains.return_value = {'port_chains': []}
# call the VIM connector
self.assertRaises(vimconn.vimconnNotFoundException,
'5d002f38-82de-11e7-a770-f303f11ce66a')
# assert that VIM connector called OpenStack with the expected filter
- list_port_chain.assert_called_with(
+ list_sfc_port_chains.assert_called_with(
id='5d002f38-82de-11e7-a770-f303f11ce66a')
- @mock.patch.object(Client, 'delete_flow_classifier')
- def test_delete_classification(self, delete_flow_classifier):
+ @mock.patch.object(Client, 'delete_sfc_flow_classifier')
+ def test_delete_classification(self, delete_sfc_flow_classifier):
result = self.vimconn.delete_classification(
'638f957c-82df-11e7-b7c8-132706021464')
- delete_flow_classifier.assert_called_with(
+ delete_sfc_flow_classifier.assert_called_with(
'638f957c-82df-11e7-b7c8-132706021464')
self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
- @mock.patch.object(Client, 'delete_port_pair')
- def test_delete_sfi(self, delete_port_pair):
+ @mock.patch.object(Client, 'delete_sfc_port_pair')
+ def test_delete_sfi(self, delete_sfc_port_pair):
result = self.vimconn.delete_sfi(
'638f957c-82df-11e7-b7c8-132706021464')
- delete_port_pair.assert_called_with(
+ delete_sfc_port_pair.assert_called_with(
'638f957c-82df-11e7-b7c8-132706021464')
self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
- @mock.patch.object(Client, 'delete_port_pair_group')
- def test_delete_sf(self, delete_port_pair_group):
+ @mock.patch.object(Client, 'delete_sfc_port_pair_group')
+ def test_delete_sf(self, delete_sfc_port_pair_group):
result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464')
- delete_port_pair_group.assert_called_with(
+ delete_sfc_port_pair_group.assert_called_with(
'638f957c-82df-11e7-b7c8-132706021464')
self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
- @mock.patch.object(Client, 'delete_port_chain')
- def test_delete_sfp(self, delete_port_chain):
+ @mock.patch.object(Client, 'delete_sfc_port_chain')
+ def test_delete_sfp(self, delete_sfc_port_chain):
result = self.vimconn.delete_sfp(
'638f957c-82df-11e7-b7c8-132706021464')
- delete_port_chain.assert_called_with(
+ delete_sfc_port_chain.assert_called_with(
'638f957c-82df-11e7-b7c8-132706021464')
self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openvim
# All Rights Reserved.
#
def new_sfi(self, task):
vim_sfi_id = None
try:
- params = task["params"]
+ dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depends = task.get("depends")
error_text = ""
- interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
# At the moment, every port associated with the VM will be used both as ingress and egress ports.
# Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
# first ingress and first egress ports will be used to create the SFI (Port Pair).
- port_id_list = [interfaces[0].get("vim_id")]
+ port_id_list = [interfaces[0]]
name = "sfi-%s" % task["item_id"][:8]
# By default no form of IETF SFC Encapsulation will be used
vim_sfi_id = self.vim.new_sfi(name, port_id_list, port_id_list, sfc_encap=False)
def new_sf(self, task):
vim_sf_id = None
try:
- params = task["params"]
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depends = task.get("depends")
error_text = ""
+ depending_tasks = [ "TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
#sfis = task.get("depends").values()[0].get("extra").get("params")[5]
- sfis = task.get("depends").values()
+ sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
sfi_id_list = []
for sfi in sfis:
sfi_id_list.append(sfi.get("vim_id"))
try:
params = task["params"]
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depends = task.get("depends")
+ depending_task = "TASK-" + str(task.get("extra").get("depends_on")[0])
error_text = ""
- interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ interfaces = task.get("depends").get(depending_task).get("vim_interfaces").keys()
# Bear in mind that different VIM connectors might support Classifications differently.
# In the case of OpenStack, only the first VNF attached to the classifier will be used
# to create the Classification(s) (the "logical source port" of the "Flow Classifier").
if '/' not in destination_ip:
destination_ip += '/32'
definition = {
- "logical_source_port": interfaces[0].get("vim_id"),
+ "logical_source_port": interfaces[0],
"protocol": ip_proto,
"source_ip_prefix": source_ip,
"destination_ip_prefix": destination_ip,
try:
params = task["params"]
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depends = task.get("depends")
+ depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in task.get("extra").get("depends_on")]
error_text = ""
- deps = task.get("depends").values()
sf_id_list = []
classification_id_list = []
- for dep in deps:
+ for dep in depending_tasks:
vim_id = dep.get("vim_id")
resource = dep.get("item")
if resource == "instance_sfs":
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
'name': (optional) name for the interface.
'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
- 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
+ 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
'mac_address': (optional) mac address to assign to this interface
'ip_address': (optional) IP address to assign to this interface
#TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
name
net_id - subnet_id from AWS
vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
- model: (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
+ model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
mac_address: (optional) mac address to assign to this interface
type: (mandatory) can be one of:
virtual, in this case always connected to a network of type 'net_type=bridge'
# -*- coding: utf-8 -*-
##
-# Copyright 2017 Telefónica Digital España S.L.U.
+# Copyright 2017 Telefonica Digital Spain S.L.U.
# This file is part of ETSI OSM
# All Rights Reserved.
#
vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API.
"""
__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \
- "Pages, Andres Pozo Muñoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
+ "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
__date__ = "$13-dec-2017 11:09:29$"
import vimconn
import requests
name:
net_id: network uuid to connect
vpci: virtual vcpi to assign
- model: interface model, virtio, e2000, ...
+ model: interface model, virtio, e1000, ...
mac_address:
use: 'data', 'bridge', 'mgmt'
type: 'virtual', 'PF', 'VF', 'VFnotShared'
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
__date__ = "$22-sep-2017 23:59:59$"
import vimconn
-import json
+# import json
import logging
import netaddr
import time
def _format_exception(self, exception):
'''Transform a keystone, nova, neutron exception into a vimconn exception'''
- if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
- ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed
- )):
+ if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound, gl1Exceptions.HTTPNotFound)):
+ raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+ ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+ raise vimconn.vimconnException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
- neExceptions.NeutronException, nvExceptions.BadRequest)):
+ neExceptions.NeutronException)):
raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
- elif isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound)):
- raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, nvExceptions.Conflict):
raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, vimconn.vimconnException):
else:
project = self.keystone.tenants.create(tenant_name, tenant_description)
return project.id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError) as e:
self._format_exception(e)
def delete_tenant(self, tenant_id):
else:
self.keystone.tenants.delete(tenant_id)
return tenant_id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError) as e:
self._format_exception(e)
def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
retry=0
max_retries=3
name_suffix = 0
- name=flavor_data['name']
- while retry<max_retries:
- retry+=1
- try:
- self._reload_connection()
- if change_name_if_used:
- #get used names
- fl_names=[]
- fl=self.nova.flavors.list()
- for f in fl:
- fl_names.append(f.name)
- while name in fl_names:
- name_suffix += 1
- name = flavor_data['name']+"-" + str(name_suffix)
-
- ram = flavor_data.get('ram',64)
- vcpus = flavor_data.get('vcpus',1)
- numa_properties=None
-
- extended = flavor_data.get("extended")
- if extended:
- numas=extended.get("numas")
- if numas:
- numa_nodes = len(numas)
- if numa_nodes > 1:
- return -1, "Can not add flavor with more than one numa"
- numa_properties = {"hw:numa_nodes":str(numa_nodes)}
- numa_properties["hw:mem_page_size"] = "large"
- numa_properties["hw:cpu_policy"] = "dedicated"
- numa_properties["hw:numa_mempolicy"] = "strict"
- if self.vim_type == "VIO":
- numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
- numa_properties["vmware:latency_sensitivity_level"] = "high"
- for numa in numas:
- #overwrite ram and vcpus
- #check if key 'memory' is present in numa else use ram value at flavor
- if 'memory' in numa:
- ram = numa['memory']*1024
- #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
- if 'paired-threads' in numa:
- vcpus = numa['paired-threads']*2
- #cpu_thread_policy "require" implies that the compute node must have an STM architecture
- numa_properties["hw:cpu_thread_policy"] = "require"
- numa_properties["hw:cpu_policy"] = "dedicated"
- elif 'cores' in numa:
- vcpus = numa['cores']
- # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
- numa_properties["hw:cpu_thread_policy"] = "isolate"
- numa_properties["hw:cpu_policy"] = "dedicated"
- elif 'threads' in numa:
- vcpus = numa['threads']
- # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
- numa_properties["hw:cpu_thread_policy"] = "prefer"
- numa_properties["hw:cpu_policy"] = "dedicated"
- # for interface in numa.get("interfaces",() ):
- # if interface["dedicated"]=="yes":
- # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-
- #create flavor
- new_flavor=self.nova.flavors.create(name,
- ram,
- vcpus,
- flavor_data.get('disk',0),
- is_public=flavor_data.get('is_public', True)
- )
- #add metadata
- if numa_properties:
- new_flavor.set_keys(numa_properties)
- return new_flavor.id
- except nvExceptions.Conflict as e:
- if change_name_if_used and retry < max_retries:
- continue
- self._format_exception(e)
- #except nvExceptions.BadRequest as e:
- except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
- self._format_exception(e)
+ try:
+ name=flavor_data['name']
+ while retry<max_retries:
+ retry+=1
+ try:
+ self._reload_connection()
+ if change_name_if_used:
+ #get used names
+ fl_names=[]
+ fl=self.nova.flavors.list()
+ for f in fl:
+ fl_names.append(f.name)
+ while name in fl_names:
+ name_suffix += 1
+ name = flavor_data['name']+"-" + str(name_suffix)
+
+ ram = flavor_data.get('ram',64)
+ vcpus = flavor_data.get('vcpus',1)
+ numa_properties=None
+
+ extended = flavor_data.get("extended")
+ if extended:
+ numas=extended.get("numas")
+ if numas:
+ numa_nodes = len(numas)
+ if numa_nodes > 1:
+ return -1, "Can not add flavor with more than one numa"
+ numa_properties = {"hw:numa_nodes":str(numa_nodes)}
+ numa_properties["hw:mem_page_size"] = "large"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ numa_properties["hw:numa_mempolicy"] = "strict"
+ if self.vim_type == "VIO":
+ numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+ numa_properties["vmware:latency_sensitivity_level"] = "high"
+ for numa in numas:
+ #overwrite ram and vcpus
+ #check if key 'memory' is present in numa else use ram value at flavor
+ if 'memory' in numa:
+ ram = numa['memory']*1024
+ #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+ if 'paired-threads' in numa:
+ vcpus = numa['paired-threads']*2
+ #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+ numa_properties["hw:cpu_thread_policy"] = "require"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ elif 'cores' in numa:
+ vcpus = numa['cores']
+ # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+ numa_properties["hw:cpu_thread_policy"] = "isolate"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ elif 'threads' in numa:
+ vcpus = numa['threads']
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ numa_properties["hw:cpu_thread_policy"] = "prefer"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ # for interface in numa.get("interfaces",() ):
+ # if interface["dedicated"]=="yes":
+ # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+ # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+
+ #create flavor
+ new_flavor=self.nova.flavors.create(name,
+ ram,
+ vcpus,
+ flavor_data.get('disk',0),
+ is_public=flavor_data.get('is_public', True)
+ )
+ #add metadata
+ if numa_properties:
+ new_flavor.set_keys(numa_properties)
+ return new_flavor.id
+ except nvExceptions.Conflict as e:
+ if change_name_if_used and retry < max_retries:
+ continue
+ self._format_exception(e)
+ #except nvExceptions.BadRequest as e:
+ except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+ self._format_exception(e)
def delete_flavor(self,flavor_id):
'''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
else:
disk_format="raw"
self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
- new_image = self.glance.images.create(name=image_dict['name'])
+ if self.vim_type == "VIO":
+ container_format = "bare"
+ if 'container_format' in image_dict:
+ container_format = image_dict['container_format']
+ new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
+ disk_format=disk_format)
+ else:
+ new_image = self.glance.images.create(name=image_dict['name'])
if image_dict['location'].startswith("http"):
# TODO there is not a method to direct download. It must be downloaded locally with requests
raise vimconn.vimconnNotImplemented("Cannot create image from URL")
#new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
# container_format="bare", data=fimage, disk_format=disk_format)
metadata_to_load = image_dict.get('metadata')
- #TODO location is a reserved word for current openstack versions. Use another word
- metadata_to_load['location'] = image_dict['location']
+ # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
+ if self.vim_type == "VIO":
+ metadata_to_load['upload_location'] = image_dict['location']
+ else:
+ metadata_to_load['location'] = image_dict['location']
self.glance.images.update(new_image.id, **metadata_to_load)
return new_image.id
except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
self._reload_connection()
self.glance.images.delete(image_id)
return image_id
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: #TODO remove
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
self._format_exception(e)
def get_image_id_from_path(self, path):
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
name:
net_id: network uuid to connect
vpci: virtual vcpi to assign
- model: interface model, virtio, e2000, ...
+ model: interface model, virtio, e1000, ...
mac_address:
use: 'data', 'bridge', 'mgmt'
type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
if net.get("vpci"):
net_dict["vpci"] = net["vpci"]
if net.get("model"):
- if net["model"] == "VIRTIO":
+ if net["model"] == "VIRTIO" or net["model"] == "paravirt":
net_dict["model"] = "virtio"
else:
net_dict["model"] = net["model"]
else:
net_uuid = net_uuid[3]
# create dict entry
- self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
- vdc_uuid,
- net_details.get('name')))
+ self.logger.debug("get_vcd_network_list(): Adding network {} "
+ "to a list vcd id {} network {}".format(net_uuid,
+ vdc_uuid,
+ net_details.get('name')))
filter_dict["name"] = net_details.get('name')
filter_dict["id"] = net_uuid
if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
else:
net_uuid = net_uuid[3]
# create dict entry
- self.logger.debug("Adding {} to a list vcd id {} network {}".format(net_uuid,
- vdcid,
- net_details.get('name')))
+ self.logger.debug("get_network_list(): Adding net {}"
+ " to a list vcd id {} network {}".format(net_uuid,
+ vdcid,
+ net_details.get('name')))
filter_entry["name"] = net_details.get('name')
filter_entry["id"] = net_uuid
if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
'name': (optional) name for the interface.
'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
- 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
+ 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
'mac_address': (optional) mac address to assign to this interface
#TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
vmname_andid = ''.join(new_vm_name)
for net in net_list:
- if net['type'] == "SR-IOV" or net['type'] == "PCI-PASSTHROUGH":
+ if net['type'] == "PCI-PASSTHROUGH":
raise vimconn.vimconnNotSupportedException(
"Current vCD version does not support type : {}".format(net['type']))
raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
"(Failed retrieve catalog information {})".format(name, image_id))
-
# Set vCPU and Memory based on flavor.
vm_cpus = None
vm_memory = None
#If no mgmt, then the 1st NN in netlist is considered as primary net.
primary_net = None
primary_netname = None
+ primary_net_href = None
network_mode = 'bridged'
if net_list is not None and len(net_list) > 0:
for net in net_list:
try:
primary_net_id = primary_net['net_id']
+ url_list = [self.url, '/api/network/', primary_net_id]
+ primary_net_href = ''.join(url_list)
network_dict = self.get_vcd_network(network_uuid=primary_net_id)
if 'name' in network_dict:
primary_netname = network_dict['name']
<InstantiationParams>
<NetworkConfigSection>
<ovf:Info>Configuration parameters for logical networks</ovf:Info>
- <NetworkConfig networkName="None">
+ <NetworkConfig networkName="{}">
<Configuration>
- <ParentNetwork href=""/>
+ <ParentNetwork href="{}" />
<FenceMode>bridged</FenceMode>
</Configuration>
</NetworkConfig>
</SourcedItem>
<AllEULAsAccepted>false</AllEULAsAccepted>
</InstantiateVAppTemplateParams>""".format(vmname_andid,
+ primary_netname,
+ primary_net_href,
vapp_tempalte_href,
vm_href,
vm_id,
#Add PCI passthrough/SRIOV configrations
vm_obj = None
pci_devices_info = []
- sriov_net_info = []
reserve_memory = False
for net in net_list:
if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
pci_devices_info.append(net)
elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
- sriov_net_info.append(net)
+ reserve_memory = True
#Add PCI
if len(pci_devices_info) > 0:
# add NICs & connect to networks in netlist
try:
+ vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vapp_resource = vdc_obj.get_vapp(vmname_andid)
+ vapp = VApp(self.client, resource=vapp_resource)
+ vapp_id = vapp_resource.get('id').split(':')[-1]
+
+ self.logger.info("Removing primary NIC: ")
+ # First remove all NICs so that NIC properties can be adjusted as needed
+ self.remove_primary_network_adapter_from_all_vms(vapp)
+
self.logger.info("Request to connect VM to a network: {}".format(net_list))
- nicIndex = 0
primary_nic_index = 0
+ nicIndex = 0
for net in net_list:
# openmano uses network id in UUID format.
# vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
- NONE (No IP addressing mode specified.)"""
if primary_netname is not None:
+ self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
#For python3
#nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
if len(nets) == 1:
self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
- vdc_obj = VDC(self.client, href=vdc.get('href'))
- vapp_resource = vdc_obj.get_vapp(vmname_andid)
- vapp = VApp(self.client, resource=vapp_resource)
- # connect network to VM - with all DHCP by default
- task = vapp.connect_org_vdc_network(nets[0].get('name'))
-
- self.client.get_task_monitor().wait_for_success(task=task)
+ if interface_net_name != primary_netname:
+ # connect network to VM - with all DHCP by default
+ self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
+ self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
- type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
+ type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
+ nic_type = 'VMXNET3'
if 'type' in net and net['type'] not in type_list:
# fetching nic type from vnf
if 'model' in net:
- if net['model'] is not None and net['model'].lower() == 'virtio':
- nic_type = 'VMXNET3'
+ if net['model'] is not None:
+ if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
+ nic_type = 'VMXNET3'
else:
nic_type = net['model']
else:
self.logger.info("new_vminstance(): adding network adapter "\
"to a network {}".format(nets[0].get('name')))
+ if net['type'] in ['SR-IOV', 'VF']:
+ nic_type = net['type']
self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
primary_nic_index,
nicIndex,
- net)
+ net,
+ nic_type=nic_type)
nicIndex += 1
# cloud-init for ssh-key injection
if cloud_config:
self.cloud_init(vapp,cloud_config)
- # ############# Stub code for SRIOV #################
- #Add SRIOV
-# if len(sriov_net_info) > 0:
-# self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
-# vmname_andid ))
-# sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
-# sriov_net_info,
-# vmname_andid)
-# if sriov_status:
-# self.logger.info("Added SRIOV {} to VM {}".format(
-# sriov_net_info,
-# vmname_andid)
-# )
-# reserve_memory = True
-# else:
-# self.logger.info("Fail to add SRIOV {} to VM {}".format(
-# sriov_net_info,
-# vmname_andid)
-# )
-
# If VM has PCI devices or SRIOV reserve memory for VM
if reserve_memory:
- memReserve = vm_obj.config.hardware.memoryMB
- spec = vim.vm.ConfigSpec()
- spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
- task = vm_obj.ReconfigVM_Task(spec=spec)
- if task:
- result = self.wait_for_vcenter_task(task, vcenter_conect)
- self.logger.info("Reserved memory {} MB for "
- "VM VM status: {}".format(str(memReserve), result))
- else:
- self.logger.info("Fail to reserved memory {} to VM {}".format(
- str(memReserve), str(vm_obj)))
+ self.reserve_memory_for_all_vms(vapp, memory_mb)
self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
- vapp_id = vapp_resource.get('id').split(':')[-1]
poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
if result.get('status') == 'success':
try:
vapp_name = self.get_namebyvappid(vm__vim_uuid)
- vapp_resource = vdc_obj.get_vapp(vapp_name)
- vapp = VApp(self.client, resource=vapp_resource)
if vapp_name is None:
self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
- else:
- self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+ self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+ vapp_resource = vdc_obj.get_vapp(vapp_name)
+ vapp = VApp(self.client, resource=vapp_resource)
# Delete vApp and wait for status change if task executed and vApp is None.
"VM details")
xmlroot = XmlElementTree.fromstring(response.content)
+
result = response.content.replace("\n"," ")
- hdd_mb = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result).group(1)
- vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
- cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
- vm_details['cpus'] = int(cpus) if cpus else None
+ hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
+ if hdd_match:
+ hdd_mb = hdd_match.group(1)
+ vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+ cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
+ if cpus_match:
+ cpus = cpus_match.group(1)
+ vm_details['cpus'] = int(cpus) if cpus else None
memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
# either use client provided UUID or search for a first available
# if both are not defined we return none
if parent_network_uuid is not None:
- url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+ provider_network = None
+ available_networks = None
+ add_vdc_rest_url = None
+
+ url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
add_vdc_rest_url = ''.join(url_list)
+ url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+ available_networks = ''.join(url_list)
+
#Creating all networks as Direct Org VDC type networks.
#Unused in case of Underlay (data/ptp) network interface.
fence_mode="bridged"
" for VM : {}".format(exp))
raise vimconn.vimconnException(message=exp)
+
+ def reserve_memory_for_all_vms(self, vapp, memory_mb):
+ """
+ Method to reserve memory for all VMs
+ Args :
+ vapp - VApp
+ memory_mb - Memory in MB
+ Returns:
+ None
+ """
+
+ self.logger.info("Reserve memory for all VMs")
+ for vms in vapp.get_all_vms():
+ vm_id = vms.get('id').split(':')[-1]
+
+ url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
+
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
+ response = self.perform_request(req_type='GET',
+ url=url_rest_call,
+ headers=headers)
+
+ if response.status_code == 403:
+ response = self.retry_rest('GET', url_rest_call)
+
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
+ "memory")
+
+ bytexml = bytes(bytearray(response.content, encoding='utf-8'))
+ contentelem = lxmlElementTree.XML(bytexml)
+ namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
+ namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+ # Find the reservation element in the response
+ memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
+ for memelem in memelem_list:
+ memelem.text = str(memory_mb)
+
+ newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
+
+ response = self.perform_request(req_type='PUT',
+ url=url_rest_call,
+ headers=headers,
+ data=newdata)
+
+ if response.status_code == 403:
+ add_headers = {'Content-Type': headers['Content-Type']}
+ response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+ if response.status_code != 202:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {} ".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
+ "virtual hardware memory section")
+ else:
+ mem_task = self.get_task_from_response(response.content)
+ result = self.client.get_task_monitor().wait_for_success(task=mem_task)
+ if result.get('status') == 'success':
+ self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
+ .format(vm_id))
+ else:
+ self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
+ .format(vm_id))
+
+ def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
+ """
+ Configure VApp network config with org vdc network
+ Args :
+ vapp - VApp
+ Returns:
+ None
+ """
+
+ self.logger.info("Connecting vapp {} to org vdc network {}".
+ format(vapp_id, net_name))
+
+ url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ response = self.perform_request(req_type='GET',
+ url=url_rest_call,
+ headers=headers)
+
+ if response.status_code == 403:
+ response = self.retry_rest('GET', url_rest_call)
+
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
+ "network config section")
+
+ data = response.content
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+ net_id = self.get_network_id_by_name(net_name)
+ if not net_id:
+ raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
+ "existing network")
+
+ bytexml = bytes(bytearray(data, encoding='utf-8'))
+ newelem = lxmlElementTree.XML(bytexml)
+ namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
+ namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
+ nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
+
+ newstr = """<NetworkConfig networkName="{}">
+ <Configuration>
+ <ParentNetwork href="{}/api/network/{}"/>
+ <FenceMode>bridged</FenceMode>
+ </Configuration>
+ </NetworkConfig>
+ """.format(net_name, self.url, net_id)
+ newcfgelem = lxmlElementTree.fromstring(newstr)
+ if nwcfglist:
+ nwcfglist[0].addnext(newcfgelem)
+
+ newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
+
+ response = self.perform_request(req_type='PUT',
+ url=url_rest_call,
+ headers=headers,
+ data=newdata)
+
+ if response.status_code == 403:
+ add_headers = {'Content-Type': headers['Content-Type']}
+ response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+ if response.status_code != 202:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {} ".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
+ "network config section")
+ else:
+ vapp_task = self.get_task_from_response(response.content)
+ result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
+ if result.get('status') == 'success':
+ self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
+ "network {}".format(vapp_id, net_name))
+ else:
+ self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
+ "connect to network {}".format(vapp_id, net_name))
+
+ def remove_primary_network_adapter_from_all_vms(self, vapp):
+ """
+ Method to remove network adapter type to vm
+ Args :
+ vapp - VApp
+ Returns:
+ None
+ """
+
+ self.logger.info("Removing network adapter from all VMs")
+ for vms in vapp.get_all_vms():
+ vm_id = vms.get('id').split(':')[-1]
+
+ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ response = self.perform_request(req_type='GET',
+ url=url_rest_call,
+ headers=headers)
+
+ if response.status_code == 403:
+ response = self.retry_rest('GET', url_rest_call)
+
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
+ "network connection section")
+
+ data = response.content
+ data = data.split('<Link rel="edit"')[0]
+
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+ newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
+ xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+ xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+ xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
+ xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+ xmlns:vmw="http://www.vmware.com/schema/ovf"
+ xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+ xmlns:ns9="http://www.vmware.com/vcloud/versions"
+ href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
+ <ovf:Info>Specifies the available VM network connections</ovf:Info>
+ <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
+ <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
+ </NetworkConnectionSection>""".format(url=url_rest_call)
+ response = self.perform_request(req_type='PUT',
+ url=url_rest_call,
+ headers=headers,
+ data=newdata)
+
+ if response.status_code == 403:
+ add_headers = {'Content-Type': headers['Content-Type']}
+ response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+ if response.status_code != 202:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {} ".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
+ "network connection section")
+ else:
+ nic_task = self.get_task_from_response(response.content)
+ result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+ if result.get('status') == 'success':
+ self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
+ "default NIC type".format(vm_id))
+ else:
+ self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
+ "connect NIC type".format(vm_id))
+
def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
"""
Method to add network adapter type to vm
None
"""
+ self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
+ format(network_name, nicIndex, nic_type))
try:
ip_address = None
floating_ip = False
data = response.content
data = data.split('<Link rel="edit"')[0]
if '<PrimaryNetworkConnectionIndex>' not in data:
+ self.logger.debug("add_network_adapter PrimaryNIC not in data")
item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
else:
+ self.logger.debug("add_network_adapter PrimaryNIC in data")
new_item = """<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
for vms in vapp.get_all_vms():
vm_id = vms.get('id').split(':')[-1]
-
url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
headers = {'Accept':'application/*+xml;version=' + API_VERSION,
"network connection section")
data = response.content
data = data.split('<Link rel="edit"')[0]
+ vcd_netadapter_type = nic_type
+ if nic_type in ['SR-IOV', 'VF']:
+ vcd_netadapter_type = "SRIOVETHERNETCARD"
+
if '<PrimaryNetworkConnectionIndex>' not in data:
+ self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
<NetworkAdapterType>{}</NetworkAdapterType>
</NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
- allocation_mode, nic_type)
+ allocation_mode, vcd_netadapter_type)
# Stub for ip_address feature
if ip_address:
ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
else:
+ self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
new_item = """<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
<NetworkAdapterType>{}</NetworkAdapterType>
</NetworkConnection>""".format(network_name, nicIndex,
- allocation_mode, nic_type)
+ allocation_mode, vcd_netadapter_type)
# Stub for ip_address feature
if ip_address:
ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
Returns org and vdc object
"""
- org = Org(self.client, resource=self.client.get_org())
- vdc = org.get_vdc(self.tenant_name)
+ vdc = None
+ try:
+ org = Org(self.client, resource=self.client.get_org())
+ vdc = org.get_vdc(self.tenant_name)
+ except Exception as e:
+ # pyvcloud not giving a specific exception, Refresh nevertheless
+ self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
#Retry once, if failed by refreshing token
if vdc is None:
self.get_token()
+ org = Org(self.client, resource=self.client.get_org())
vdc = org.get_vdc(self.tenant_name)
return org, vdc
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#wait 120 sec
if [ $attempt -ge $max_attempts ]; then
echo
- echo "Can not connect to database ${db_host}:${db_port} during $max_attempts sec"
+ echo "Cannot connect to database ${db_host}:${db_port} during $max_attempts sec"
return 1
fi
attempt=$[$attempt+1]
echo "1/4 Apply config"
-configure || exit 1
+# this is not needed anymore because envioron overwrites config file
+# configure || exit 1
echo "2/4 Wait for db up"
echo "4/4 Try to start"
-/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log --create-tenant=osm
+# look for openmanod.cfg
+RO_CONFIG_FILE="/etc/osm/openmanod.cfg"
+[ -f "$RO_CONFIG_FILE" ] || RO_CONFIG_FILE=$(python -c 'import osm_ro; print(osm_ro.__path__[0])')/openmanod.cfg
+[ -f "$RO_CONFIG_FILE" ] || ! echo "configuration file 'openmanod.cfg' not found" || exit 1
+
+openmanod -c "$RO_CONFIG_FILE" --create-tenant=osm # --log-file=/var/log/osm/openmano.log
+
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Install last stable source code of lib-osm-openvim and the needed packages"
+ echo -e " OPTIONS"
+ echo -e " -h --help: show this help"
+ echo -e " -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+ echo -e " -b master (main branch)"
+ echo -e " -b v2.0 (v2.0 branch)"
+ echo -e " -b tags/v1.1.0 (a specific tag)"
+ echo -e " ..."
+ echo -e " --develop: install last master version for developers"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+ "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+ case "${o}" in
+ b)
+ export COMMIT_ID=${OPTARG}
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+
su $SUDO_USER -c "git -C '${BASEFOLDER}' clone ${GIT_OVIM_URL} lib-openvim" ||
! echo "Error cannot clone from '${GIT_OVIM_URL}'" >&2 || exit 1
if [[ -n $COMMIT_ID ]] ; then
make -C "${BASEFOLDER}/lib-openvim" prepare_lite
export LANG="en_US.UTF-8"
-pip2 install -e "${BASEFOLDER}/lib-openvim/build"
+pip2 install -e "${BASEFOLDER}/lib-openvim/build" || ! echo "ERROR installing lib-osm-openvim library!!!" >&2 ||
+ exit 1
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+function usage(){
+ echo -e "usage: sudo -E $0 [OPTIONS]"
+ echo -e "Install last stable source code of osm-im and the needed packages"
+ echo -e " OPTIONS"
+ echo -e " -h --help: show this help"
+ echo -e " -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+ echo -e " -b master (main branch)"
+ echo -e " -b v2.0 (v2.0 branch)"
+ echo -e " -b tags/v1.1.0 (a specific tag)"
+ echo -e " ..."
+ echo -e " --develop: install last master version for developers"
+ echo -e " --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+ "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+ case "${o}" in
+ b)
+ export COMMIT_ID=${OPTARG}
+ ;;
+ h)
+ usage && exit 0
+ ;;
+ -)
+ [ "${OPTARG}" == "help" ] && usage && exit 0
+ [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+ [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+ [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+ echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ \?)
+ echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ :)
+ echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+ exit 1
+ ;;
+ *)
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
su $SUDO_USER -c "git -C ${BASEFOLDER} clone ${GIT_OSMIM_URL} IM" ||
! echo "Error cannot clone from '${GIT_OSMIM_URL}'" >&2 || exit 1
-o "${BASEFOLDER}/IM/osm_im/${target}.py" "${BASEFOLDER}/IM/models/yang/${target}.yang"
done
-pip2 install -e "${BASEFOLDER}/IM" || ! echo "ERROR installing python-osm-im library!!!" >&2 || exit 1
\ No newline at end of file
+pip2 install -e "${BASEFOLDER}/IM" || ! echo "ERROR installing python-osm-im library!!!" >&2 || exit 1
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#"lib_osm_openvim",
#"osm_im",
"pycrypto",
+ "netaddr",
]
setup(name=_name,
- version_command=('git describe --match v*', 'pep440-git'),
+ version_command=('git describe --match v*', 'pep440-git-full'),
description = _description,
long_description = open('README.rst').read(),
author = _author,
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
import sys
import time
import uuid
-import json
from argparse import ArgumentParser
__author__ = "Pablo Montes, Alfonso Tierno"
vca_object = test_config["vim_conn"].connect()
logger.debug("{}".format(vca_object))
self.assertIsNotNone(vca_object)
+ elif test_config['vimtype'] == 'openstack':
+ test_config["vim_conn"]._reload_connection()
+ network_list = test_config["vim_conn"].get_network_list()
+ logger.debug("{}".format(network_list))
+ self.assertIsNotNone(network_list)
class test_vimconn_new_network(test_base):
network_name = None
version = item['ip-profile']['ip-version']
dhcp_count = item['ip-profile']['dhcp']['count']
dhcp_enabled = item['ip-profile']['dhcp']['enabled']
+ dhcp_start_address = item['ip-profile']['dhcp']['start-address']
+ subnet_address = item['ip-profile']['subnet-address']
+
self.__class__.network_name = _get_random_string(20)
ip_profile = {'dhcp_count': dhcp_count,
'dhcp_enabled': dhcp_enabled,
- 'ip_version': version
+ 'dhcp_start_address': dhcp_start_address,
+ 'ip_version': version,
+ 'subnet_address': subnet_address
}
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
# refresh net status
net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
- self.assertEqual(net_dict, {})
+ if test_config['vimtype'] == 'openstack':
+ self.assertEqual(net_dict[unknown_net_id]['status'], 'DELETED')
+ else:
+ # TODO : Fix vmware connector to return status DELETED as per vimconn.py
+ self.assertEqual(net_dict, {})
class test_vimconn_get_network_list(test_base):
network_name = None
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+ if test_config['vimtype'] == 'openstack':
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
# find network from list by it's name
new_network_list = test_config["vim_conn"].get_network_list({'name': network_name})
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+ if test_config['vimtype'] == 'openstack':
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
# find network from list by it's shared value
new_network_list = test_config["vim_conn"].get_network_list({'shared':Shared,
'name':network_name})
self.__class__.test_index += 1
tenant_list = test_config["vim_conn"].get_tenant_list()
- network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+ if test_config['vimtype'] == 'openstack':
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
for tenant_item in tenant_list:
if test_config['tenant'] == tenant_item.get('name'):
self.__class__.test_index += 1
status = 'ACTIVE'
- network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+ if test_config['vimtype'] == 'openstack':
+ network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+ else:
+ network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
# find network from list by it's status
new_network_list = test_config["vim_conn"].get_network_list({'status':status,
with self.assertRaises(Exception) as context:
test_config["vim_conn"].delete_network(Non_exist_id)
- self.assertEqual((context.exception).http_code, 400)
+ self.assertEqual((context.exception).http_code, 404)
class test_vimconn_get_flavor(test_base):
vcpus = item['vcpus']
disk = item['disk']
- flavor_data = {'ram': ram,
+ flavor_data = {
+ 'name' : _get_random_string(20),
+ 'ram': ram,
'vcpus': vcpus,
'disk': disk
- }
+ }
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
flavor_id = None
def test_000_new_flavor(self):
- flavor_data = {'ram': 1024, 'vpcus': 1, 'disk': 10}
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vpcus': 1, 'disk': 10}
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
# create new flavor
self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
- self.assertEqual(type(self.__class__.flavor_id),str)
- self.assertIsInstance(uuid.UUID(self.__class__.flavor_id),uuid.UUID)
+ self.assertIsInstance(self.__class__.flavor_id, (str, unicode))
+ self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
def test_010_delete_flavor(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
image_path = test_config['image_path']
if image_path:
- self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path })
+ self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path, 'metadata': {'upload_location':None} })
time.sleep(20)
- self.assertEqual(type(self.__class__.image_id),str)
- self.assertIsInstance(uuid.UUID(self.__class__.image_id),uuid.UUID)
+
+ self.assertIsInstance(self.__class__.image_id, (str, unicode))
+ self.assertIsInstance(uuid.UUID(self.__class__.image_id), uuid.UUID)
else:
self.skipTest("Skipping test as image file not present at RO container")
self.__class__.test_index += 1
with self.assertRaises(Exception) as context:
- test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+ test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path})
self.assertEqual((context.exception).http_code, 400)
self.__class__.test_index += 1
image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
- self.assertEqual(type(image_id),str)
+
+ self.assertIsInstance(image_id, (str, unicode))
def test_030_delete_image_negative(self):
Non_exist_image_id = str(uuid.uuid4())
if 'name' in item:
self.__class__.image_name = item['name']
self.__class__.image_id = item['id']
- self.assertEqual(type(self.__class__.image_name),str)
- self.assertEqual(type(self.__class__.image_id),str)
+ self.assertIsInstance(self.__class__.image_name, (str, unicode))
+ self.assertIsInstance(self.__class__.image_id, (str, unicode))
def test_010_get_image_list_by_name(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
for item in image_list:
- self.assertEqual(type(item['id']), str)
+ self.assertIsInstance(item['id'], (str, unicode))
+ self.assertIsInstance(item['name'], (str, unicode))
self.assertEqual(item['id'], self.__class__.image_id)
- self.assertEqual(type(item['name']), str)
self.assertEqual(item['name'], self.__class__.image_name)
def test_020_get_image_list_by_id(self):
filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
for item1 in filter_image_list:
- self.assertEqual(type(item1.get('id')), str)
- self.assertEqual(item1.get('id'), self.__class__.image_id)
- self.assertEqual(type(item1.get('name')), str)
- self.assertEqual(item1.get('name'), self.__class__.image_name)
+ self.assertIsInstance(item1['id'], (str, unicode))
+ self.assertIsInstance(item1['name'], (str, unicode))
+ self.assertEqual(item1['id'], self.__class__.image_id)
+ self.assertEqual(item1['name'], self.__class__.image_name)
def test_030_get_image_list_negative(self):
Non_exist_image_id = uuid.uuid4()
self.__class__.network_id = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
net_type=self.__class__.net_type)
+ # find image name and image id
+ if test_config['image_name']:
+ image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+ if len(image_list) == 0:
+ raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+ else:
+ self.__class__.image_id = image_list[0]['id']
+ else:
+ image_list = test_config['vim_conn'].get_image_list()
+ if len(image_list) == 0:
+ raise Exception("Not found any image at VIM")
+ else:
+ self.__class__.image_id = image_list[0]['id']
def tearDown(self):
test_base.tearDown(self)
vpci = "0000:00:11.0"
name = "eth0"
- flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
# create new flavor
flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
- # find image name and image id
- if test_config['image_name']:
- image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
- if len(image_list) == 0:
- raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
- else:
- self.__class__.image_id = image_list[0]['id']
- else:
- image_list = test_config['vim_conn'].get_image_list()
- if len(image_list) == 0:
- raise Exception("Not found any image at VIM")
- else:
- self.__class__.image_id = image_list[0]['id']
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
- self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+ self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
- self.assertEqual(type(self.__class__.instance_id),str)
+ self.assertIsInstance(self.__class__.instance_id, (str, unicode))
def test_010_new_vminstance_by_model(self):
- flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
model_name = 'e1000'
name = 'eth0'
net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
- instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
- flavor_id=flavor_id,
- net_list=net_list)
- self.assertEqual(type(instance_id),str)
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,flavor_id=flavor_id,net_list=net_list)
+
+ self.assertIsInstance(instance_id, (str, unicode))
+
# Deleting created vm instance
logger.info("Deleting created vm intance")
test_config["vim_conn"].delete_vminstance(instance_id)
time.sleep(10)
def test_020_new_vminstance_by_net_use(self):
- flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
net_use = 'data'
name = 'eth0'
net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
- instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,disk_list=None,
flavor_id=flavor_id,
net_list=net_list)
- self.assertEqual(type(instance_id),str)
+ self.assertIsInstance(instance_id, (str, unicode))
+
# Deleting created vm instance
logger.info("Deleting created vm intance")
test_config["vim_conn"].delete_vminstance(instance_id)
time.sleep(10)
def test_030_new_vminstance_by_net_type(self):
- flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+ flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
_type = 'VF'
name = 'eth0'
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': _type, 'net_id': self.__class__.network_id}]
+ if test_config['vimtype'] == 'vmware':
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': _type, 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+ flavor_id=flavor_id,
+ net_list=net_list)
+ self.assertEqual(type(instance_id),str)
+
+ if test_config['vimtype'] == 'openstack':
+ # create network of type data
+ network_name = _get_random_string(20)
+ net_type = 'data'
+
+ network_id = test_config["vim_conn"].new_network(net_name=network_name,
+ net_type=net_type)
+ net_list = [{'use': net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': _type, 'net_id': network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+ image_id=self.__class__.image_id, disk_list=None,
+ flavor_id=flavor_id,
+ net_list=net_list)
+
+ self.assertEqual(type(instance_id), unicode)
+
+ # delete created network
+ result = test_config["vim_conn"].delete_network(network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(network_id))
- instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
- flavor_id=flavor_id,
- net_list=net_list)
- self.assertEqual(type(instance_id),str)
# Deleting created vm instance
logger.info("Deleting created vm intance")
test_config["vim_conn"].delete_vminstance(instance_id)
time.sleep(10)
def test_040_new_vminstance_by_cloud_config(self):
- flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
name = 'eth0'
user_name = 'test_user'
net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
- instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', image_id=self.__class__.image_id,
- flavor_id=flavor_id,
- net_list=net_list,
- cloud_config=cloud_data)
- self.assertEqual(type(instance_id),str)
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', description='', start=False,
+ image_id=self.__class__.image_id, flavor_id=flavor_id,net_list=net_list,cloud_config=cloud_data)
+
+ self.assertIsInstance(instance_id, (str, unicode))
+
# Deleting created vm instance
logger.info("Deleting created vm intance")
test_config["vim_conn"].delete_vminstance(instance_id)
time.sleep(10)
def test_050_new_vminstance_by_disk_list(self):
- flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+ flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
name = 'eth0'
- device_data = [{'image_id': self.__class__.image_id, 'size': '5'}]
+ device_data = [{'image_id': self.__class__.image_id, 'size': '10'}]
# create new flavor
flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
- instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', image_id=self.__class__.image_id,
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False, image_id=self.__class__.image_id,
flavor_id=flavor_id,
net_list=net_list,
disk_list=device_data)
- self.assertEqual(type(instance_id),str)
+
+ self.assertIsInstance(instance_id, (str, unicode))
# Deleting created vm instance
logger.info("Deleting created vm intance")
test_config["vim_conn"].delete_vminstance(instance_id)
net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
with self.assertRaises(Exception) as context:
- test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=unknown_image_id,
+ test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=unknown_image_id,
flavor_id=unknown_flavor_id,
net_list=net_list)
- self.assertEqual((context.exception).http_code, 404)
+
+ self.assertIn((context.exception).http_code, (400, 404))
+
def test_070_get_vminstance(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- vm_list = []
- vm_list.append(self.__class__.instance_id)
- # refresh vm status
- vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
- for attr in vm_info[self.__class__.instance_id]:
- if attr == 'status':
- self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
- if attr == 'interfaces':
- self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
+ if test_config['vimtype'] == 'vmware':
+ vm_list = []
+ vm_list.append(self.__class__.instance_id)
+
+ # refresh vm status
+ vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+ for attr in vm_info[self.__class__.instance_id]:
+ if attr == 'status':
+ self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
+ if attr == 'interfaces':
+ self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
+
+ if test_config['vimtype'] == 'openstack':
+ vpci = "0000:00:11.0"
+ name = "eth0"
+
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+ # create new vm instance
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+ time.sleep(30)
+ vm_list = []
+ vm_list.append(instance_id)
+
+ # refresh vm status
+ vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+ for attr in vm_info[instance_id]:
+ if attr == 'status':
+ self.assertEqual(vm_info[instance_id][attr], 'ACTIVE')
+ if attr == 'interfaces':
+ self.assertEqual(type(vm_info[instance_id][attr]), list)
+
+ #Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
def test_100_refresh_vms_status_negative(self):
unknown_id = str(uuid.uuid4())
self.__class__.test_index += 1
vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
- self.assertEqual(vm_dict, {})
+
+ if test_config['vimtype'] == 'vmware':
+ self.assertEqual(vm_dict,{})
+
+ if test_config['vimtype'] == 'openstack':
+ self.assertEqual(vm_dict[unknown_id]['status'], 'DELETED')
def test_110_action_vminstance(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- action_list = ['shutdown','start','shutoff','rebuild','pause','resume']
- # various action on vminstace
- for action in action_list:
- instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
- { action: None})
- self.assertEqual(instance_id, self.__class__.instance_id)
+ if test_config['vimtype'] == 'vmware':
+ action_list = ['shutdown', 'start', 'shutoff', 'rebuild', 'pause', 'resume']
+ # various action on vminstace
+ for action in action_list:
+ instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
+ {action: None})
+ self.assertEqual(instance_id, self.__class__.instance_id)
+
+ if test_config['vimtype'] == 'openstack':
+ # create new vm instance
+ vpci = "0000:00:11.0"
+ name = "eth0"
+
+ flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+ new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+ action_list = ['shutdown','start','shutoff','rebuild','start','pause','start']
+
+ # various action on vminstace
+ for action in action_list:
+ # sleep for sometime till status is changed
+ time.sleep(25)
+ instance_id = test_config["vim_conn"].action_vminstance(new_instance_id,
+ { action: None})
+
+ self.assertTrue(instance_id is None)
+
+ # Deleting created vm instance
+ logger.info("Deleting created vm intance")
+ test_config["vim_conn"].delete_vminstance(new_instance_id)
+ time.sleep(10)
def test_120_action_vminstance_negative(self):
non_exist_id = str(uuid.uuid4())
with self.assertRaises(Exception) as context:
test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
- self.assertEqual((context.exception).http_code, 400)
+ self.assertEqual((context.exception).http_code, 404)
+
def test_130_delete_vminstance(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
time.sleep(10)
+ def test_140_new_vminstance_sriov(self):
+ logger.info("Testing creation of sriov vm instance using {}".format(test_config['sriov_net_name']))
+ flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+ name = 'eth0'
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ sriov_net_name = test_config['sriov_net_name']
+ new_network_list = test_config["vim_conn"].get_network_list({'name': sriov_net_name})
+ for list_item in new_network_list:
+ self.assertEqual(sriov_net_name, list_item.get('name'))
+ self.__class__.sriov_network_id = list_item.get('id')
+
+ net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF', 'net_id': self.__class__.sriov_network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+ self.assertIsInstance(instance_id, (str, unicode))
+
+ logger.info("Waiting for created sriov-vm intance")
+ time.sleep(10)
+ # Deleting created vm instance
+ logger.info("Deleting created sriov-vm intance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
class test_vimconn_get_tenant_list(test_base):
tenant_id = None
for item in tenant_list:
if test_config['tenant'] == item['name']:
self.__class__.tenant_id = item['id']
- self.assertEqual(type(item['name']), str)
- self.assertEqual(type(item['id']), str)
+ self.assertIsInstance(item['name'], (str, unicode))
+ self.assertIsInstance(item['id'], (str, unicode))
def test_010_get_tenant_list_by_id(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
for item in filter_tenant_list:
- self.assertEqual(type(item['id']), str)
+ self.assertIsInstance(item['id'], (str, unicode))
self.assertEqual(item['id'], self.__class__.tenant_id)
def test_020_get_tenant_list_by_name(self):
filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
for item in filter_tenant_list:
- self.assertEqual(type(item['name']), str)
+ self.assertIsInstance(item['name'], (str, unicode))
self.assertEqual(item['name'], test_config['tenant'])
def test_030_get_tenant_list_by_name_and_id(self):
'id': self.__class__.tenant_id})
for item in filter_tenant_list:
- self.assertEqual(type(item['name']), str)
- self.assertEqual(type(item['id']), str)
+ self.assertIsInstance(item['name'], (str, unicode))
+ self.assertIsInstance(item['id'], (str, unicode))
self.assertEqual(item['name'], test_config['tenant'])
self.assertEqual(item['id'], self.__class__.tenant_id)
self.assertEqual(filter_tenant_list, [])
+
class test_vimconn_new_tenant(test_base):
tenant_id = None
inspect.currentframe().f_code.co_name)
self.__class__.test_index += 1
- self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name)
+ self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
time.sleep(15)
- self.assertEqual(type(self.__class__.tenant_id), str)
+ self.assertIsInstance(self.__class__.tenant_id, (str, unicode))
+
def test_010_new_tenant_negative(self):
Invalid_tenant_name = 10121
self.__class__.test_index += 1
with self.assertRaises(Exception) as context:
- test_config["vim_conn"].new_tenant(Invalid_tenant_name)
+ test_config["vim_conn"].new_tenant(Invalid_tenant_name, "")
self.assertEqual((context.exception).http_code, 400)
+
def test_020_delete_tenant(self):
self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
self.__class__.test_index,
self.__class__.test_index += 1
tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
- self.assertEqual(type(tenant_id), str)
+
+ self.assertIsInstance(tenant_id, (str, unicode))
def test_030_delete_tenant_negative(self):
Non_exist_tenant_name = 'Test_30_tenant'
self.assertEqual((context.exception).http_code, 404)
+def get_image_id():
+ if test_config['image_name']:
+ image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+ if len(image_list) == 0:
+ raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+ else:
+ image_id = image_list[0]['id']
+ else:
+ image_list = test_config['vim_conn'].get_image_list()
+ if len(image_list) == 0:
+ raise Exception("Not found any image at VIM")
+ else:
+ image_id = image_list[0]['id']
+ return image_id
+
+
+class test_vimconn_vminstance_by_ip_address(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+
+ self.network_id = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+ def test_000_vminstance_by_ip_address(self):
+ """
+ This test case will deploy VM with provided IP address
+ Pre-requesite: provided IP address should be from IP pool range which has used for network creation
+ """
+ name = "eth0"
+ # provide ip address
+ ip_address = ''
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id, 'ip_address': ip_address}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_010_vminstance_by_ip_address_negative(self):
+ name = "eth1"
+ # IP address not from subnet range
+ invalid_ip_address = '10.10.12.1'
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id, 'ip_address': invalid_ip_address}]
+
+ with self.assertRaises(Exception) as context:
+ test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id,
+ net_list=net_list)
+ self.assertEqual((context.exception).http_code, 400)
+
+ def test_020_vminstance_by_floating_ip(self):
+ name = "eth1"
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': True, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_030_vminstance_by_mac_address(self):
+ name = "eth1"
+ mac_address = "74:54:2f:21:da:8c"
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+ 'net_id': self.network_id,'mac_address': mac_address}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+class test_vimconn_vminstance_by_adding_10_nics(test_base):
+ network_name = None
+ net_ids = []
+
+ def setUp(self):
+ # create network
+ i = 0
+ for i in range(10):
+ self.network_name = _get_random_string(20)
+ network_id = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+ self.net_ids.append(network_id)
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ for net_id in self.net_ids:
+ result = test_config["vim_conn"].delete_network(net_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(net_id))
+ else:
+ logger.info("Failed to delete network id {}".format(net_id))
+
+ def test_000_vminstance_by_adding_10_nics(self):
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = []
+ c = 1
+ for net_id in self.net_ids:
+ name = "eth{}".format(c)
+ net_list.append({'use': 'bridge', 'name': name, 'floating_ip': False,
+ 'port_security': True, 'type': 'virtual', 'net_id': net_id})
+ c = c+1
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+class test_vimconn_vminstance_by_existing_disk(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+ def test_000_vminstance_by_existing_disk(self):
+ """ This testcase will add existing disk only if given catalog/image is free
+ means not used by any other VM
+ """
+
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+ cirros_image = test_config["vim_conn"].get_image_list({'name': 'cirros'})
+ disk_list = [{'image_id': cirros_image[0]['id'],'size': 5}]
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_010_vminstance_by_new_disk(self):
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+ disk_list = [{'size': '5'}]
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+ def test_020_vminstance_by_CDROM(self):
+ """ This testcase will insert media file only if provided catalog
+ has pre-created ISO media file into vCD
+ """
+ flavor_data ={'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+ image_list = test_config["vim_conn"].get_image_list({'name':'Ubuntu'})
+ disk_list = [{'image_id':image_list[0]['id'],'device_type':'cdrom'}]
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,disk_list=disk_list )
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
+class test_vimconn_vminstance_by_affinity_anti_affinity(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+ def test_000_vminstance_by_affinity_anti_affinity(self):
+ """ This testcase will deploy VM into provided HOSTGROUP in VIM config
+ Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+ While creating VIM account user has to pass the Host Group names in availability_zone list
+ """
+ flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list,availability_zone_index=1,
+ availability_zone_list=['HG_174','HG_175'])
+
+ self.assertEqual(type(instance_id),str)
+ time.sleep(10)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+
+class test_vimconn_vminstance_by_numa_affinity(test_base):
+ network_name = None
+ network_id = None
+
+ def setUp(self):
+ # create network
+ self.network_name = _get_random_string(20)
+ self.network_id = test_config["vim_conn"].new_network(net_name=self.network_name,
+ net_type='bridge')
+
+ def tearDown(self):
+ test_base.tearDown(self)
+ # Deleting created network
+ result = test_config["vim_conn"].delete_network(self.network_id)
+ if result:
+ logger.info("Network id {} sucessfully deleted".format(self.network_id))
+ else:
+ logger.info("Failed to delete network id {}".format(self.network_id))
+
+ def test_000_vminstance_by_numa_affinity(self):
+ flavor_data = {'extended': {'numas': [{'paired-threads-id': [['1', '3'], ['2', '4']],
+ ' paired-threads': 2, 'memory': 1}]},
+ 'ram': 1024, 'vcpus': 1, 'disk': 10}
+ name = "eth10"
+
+ # create new flavor
+ flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+ # find image name and image id
+ image_id = get_image_id()
+
+ self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ self.__class__.test_index,
+ inspect.currentframe().f_code.co_name)
+ self.__class__.test_index += 1
+
+ net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+ 'type': 'virtual', 'net_id': self.network_id}]
+
+ instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+ flavor_id=flavor_id, net_list=net_list)
+
+ self.assertEqual(type(instance_id),str)
+ logger.info("Deleting created vm instance")
+ test_config["vim_conn"].delete_vminstance(instance_id)
+ time.sleep(10)
+
+
'''
IMPORTANT NOTE
The following unittest class does not have the 'test_' on purpose. This test is the one used for the
tenant_name = args.tenant_name
test_config['tenant'] = tenant_name
- config_params = json.loads(args.config_param)
+ config_params = yaml.load(args.config_param)
org_name = config_params.get('orgname')
org_user = config_params.get('user')
org_passwd = config_params.get('passwd')
vim_url = args.endpoint_url
test_config['image_path'] = args.image_path
test_config['image_name'] = args.image_name
+ test_config['sriov_net_name'] = args.sriov_net_name
# vmware connector obj
test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,passwd=org_passwd, url=vim_url, config=config_params)
import vimconn_aws as vim
elif args.vimtype == "openstack":
import vimconn_openstack as vim
+
+ test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+ tenant_name = args.tenant_name
+ test_config['tenant'] = tenant_name
+ config_params = yaml.load(args.config_param)
+ os_user = config_params.get('user')
+ os_passwd = config_params.get('passwd')
+ vim_url = args.endpoint_url
+ test_config['image_path'] = args.image_path
+ test_config['image_name'] = args.image_name
+ test_config['sriov_net_name'] = args.sriov_net_name
+
+ # openstack connector obj
+ vim_persistent_info = {}
+ test_config['vim_conn'] = vim.vimconnector(
+ uuid="test-uuid-1", name="VIO-openstack",
+ tenant_id=None, tenant_name=tenant_name,
+ url=vim_url, url_admin=None,
+ user=os_user, passwd=os_passwd,
+ config=config_params, persistent_info=vim_persistent_info
+ )
+ test_config['vim_conn'].debug = "true"
+
elif args.vimtype == "openvim":
import vimconn_openvim as vim
else:
if args.list_tests:
tests_names = []
for cls in clsmembers:
- if cls[0].startswith('test_vimconnector'):
+ if cls[0].startswith('test_vimconn'):
tests_names.append(cls[0])
msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names))
# include all tests
for cls in clsmembers:
# We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
- if cls[0].startswith('test_vimconnector'):
+ if cls[0].startswith('test_vimconn'):
code_based_tests.append(cls[1])
logger.debug("tests to be executed: {}".format(code_based_tests))
vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
# TODO add optional arguments for vimconn tests
# vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
+ vimconn_parser.add_argument('-s', '--sriov-net-name', dest='sriov_net_name', help="Provide SRIOV network name for test")
# Datacenter test set
# -------------------
#!/bin/bash
##
-# Copyright 2017 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of OSM
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
#!/bin/bash
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#